diff --git a/README.rst b/README.rst
index e04faa8a93ca65fe156f37a9207817ad78a506e1..d8da9ec74cb4b988f9ffa25569ba970d05ef06d6 100644
--- a/README.rst
+++ b/README.rst
@@ -3,19 +3,19 @@
 .. Thu 22 May 2014 15:39:03 CEST
 
 .. image:: http://img.shields.io/badge/docs-stable-yellow.png
-   :target: http://pythonhosted.org/bob.learn.misc/index.html
+   :target: http://pythonhosted.org/bob.learn.em/index.html
 .. image:: http://img.shields.io/badge/docs-latest-orange.png
-   :target: https://www.idiap.ch/software/bob/docs/latest/bioidiap/bob.learn.misc/master/index.html
-.. image:: https://travis-ci.org/bioidiap/bob.learn.misc.svg?branch=master
-   :target: https://travis-ci.org/bioidiap/bob.learn.misc
-.. image:: https://coveralls.io/repos/bioidiap/bob.learn.misc/badge.png
-   :target: https://coveralls.io/r/bioidiap/bob.learn.misc
+   :target: https://www.idiap.ch/software/bob/docs/latest/bioidiap/bob.learn.em/master/index.html
+.. image:: https://travis-ci.org/bioidiap/bob.learn.em.svg?branch=master
+   :target: https://travis-ci.org/bioidiap/bob.learn.em
+.. image:: https://coveralls.io/repos/bioidiap/bob.learn.em/badge.png
+   :target: https://coveralls.io/r/bioidiap/bob.learn.em
 .. image:: https://img.shields.io/badge/github-master-0000c0.png
-   :target: https://github.com/bioidiap/bob.learn.misc/tree/master
-.. image:: http://img.shields.io/pypi/v/bob.learn.misc.png
-   :target: https://pypi.python.org/pypi/bob.learn.misc
-.. image:: http://img.shields.io/pypi/dm/bob.learn.misc.png
-   :target: https://pypi.python.org/pypi/bob.learn.misc
+   :target: https://github.com/bioidiap/bob.learn.em/tree/master
+.. image:: http://img.shields.io/pypi/v/bob.learn.em.png
+   :target: https://pypi.python.org/pypi/bob.learn.em
+.. image:: http://img.shields.io/pypi/dm/bob.learn.em.png
+   :target: https://pypi.python.org/pypi/bob.learn.em
 
 ===========================================
  Miscelaneous Machines and Trainers in Bob
@@ -32,7 +32,7 @@ Please make sure that you have read the `Dependencies <https://github.com/idiap/
 
 Documentation
 -------------
-For further documentation on this package, please read the `Stable Version <http://pythonhosted.org/bob.learn.misc/index.html>`_ or the `Latest Version <https://www.idiap.ch/software/bob/docs/latest/bioidiap/bob.learn.misc/master/index.html>`_ of the documentation.
+For further documentation on this package, please read the `Stable Version <http://pythonhosted.org/bob.learn.em/index.html>`_ or the `Latest Version <https://www.idiap.ch/software/bob/docs/latest/bioidiap/bob.learn.em/master/index.html>`_ of the documentation.
 For a list of tutorials on this or the other packages ob Bob_, or information on submitting issues, asking questions and starting discussions, please visit its website.
 
 .. _bob: https://www.idiap.ch/software/bob
diff --git a/bob/learn/misc/MAP_gmm_trainer.cpp b/bob/learn/misc/MAP_gmm_trainer.cpp
deleted file mode 100644
index f3a3f4c5a14b07a7622e9b5bda2c3e46db11db38..0000000000000000000000000000000000000000
--- a/bob/learn/misc/MAP_gmm_trainer.cpp
+++ /dev/null
@@ -1,431 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Web 23 Jan 16:42:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-static auto MAP_GMMTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".MAP_GMMTrainer",
-  "This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation."
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a MAP_GMMTrainer",
-    "",
-    true
-  )
-
-  .add_prototype("prior_gmm,relevance_factor, update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
-  .add_prototype("prior_gmm,alpha, update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("prior_gmm", ":py:class:`bob.learn.misc.GMMMachine`", "The prior GMM to be adapted (Universal Backgroud Model UBM).")
-  .add_parameter("reynolds_adaptation", "bool", "Will use the Reynolds adaptation procedure? See Eq (14) from [Reynolds2000]_")
-  .add_parameter("relevance_factor", "double", "If set the reynolds_adaptation parameters, will apply the Reynolds Adaptation procedure. See Eq (14) from [Reynolds2000]_")
-  .add_parameter("alpha", "double", "Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.")
-
-  .add_parameter("update_means", "bool", "Update means on each iteration")
-  .add_parameter("update_variances", "bool", "Update variances on each iteration")
-  .add_parameter("update_weights", "bool", "Update weights on each iteration")
-  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
-
-  .add_parameter("other", ":py:class:`bob.learn.misc.MAP_GMMTrainer`", "A MAP_GMMTrainer object to be copied.")
-);
-
-
-static int PyBobLearnMiscMAPGMMTrainer_init_copy(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = MAP_GMMTrainer_doc.kwlist(2);
-  PyBobLearnMiscMAPGMMTrainerObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscMAPGMMTrainer_Type, &o)){
-    MAP_GMMTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::MAP_GMMTrainer(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscMAPGMMTrainer_init_base_trainer(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist1 = MAP_GMMTrainer_doc.kwlist(0);
-  char** kwlist2 = MAP_GMMTrainer_doc.kwlist(1);
-  
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  bool reynolds_adaptation   = false;
-  double alpha = 0.5;
-  double relevance_factor = 4.0;
-  double aux = 0;
-
-  PyObject* update_means     = 0;
-  PyObject* update_variances = 0;
-  PyObject* update_weights   = 0;
-  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
-
-  PyObject* keyword_relevance_factor = Py_BuildValue("s", kwlist1[1]);
-  PyObject* keyword_alpha            = Py_BuildValue("s", kwlist2[1]);
-
-  //Here we have to select which keyword argument to read  
-  if (kwargs && PyDict_Contains(kwargs, keyword_relevance_factor) && (PyArg_ParseTupleAndKeywords(args, kwargs, "O!dO!|O!O!d", kwlist1, 
-                                                                      &PyBobLearnMiscGMMMachine_Type, &gmm_machine,
-                                                                      &aux,
-                                                                      &PyBool_Type, &update_means, 
-                                                                      &PyBool_Type, &update_variances, 
-                                                                      &PyBool_Type, &update_weights, 
-                                                                      &mean_var_update_responsibilities_threshold)))
-    reynolds_adaptation = true;    
-  else if (kwargs && PyDict_Contains(kwargs, keyword_alpha) && (PyArg_ParseTupleAndKeywords(args, kwargs, "O!dO!|O!O!d", kwlist2, 
-                                                                 &PyBobLearnMiscGMMMachine_Type, &gmm_machine,
-                                                                 &aux,
-                                                                 &PyBool_Type, &update_means, 
-                                                                 &PyBool_Type, &update_variances, 
-                                                                 &PyBool_Type, &update_weights, 
-                                                                 &mean_var_update_responsibilities_threshold)))
-    reynolds_adaptation = false;
-  else{
-    PyErr_Format(PyExc_RuntimeError, "%s. The second argument must be a keyword argument.", Py_TYPE(self)->tp_name);
-    MAP_GMMTrainer_doc.print_usage();
-    return -1;
-  }
-
-  if (reynolds_adaptation)
-    relevance_factor = aux;
-  else
-    alpha = aux;
-  
-  
-  self->cxx.reset(new bob::learn::misc::MAP_GMMTrainer(f(update_means), f(update_variances), f(update_weights), 
-                                                       mean_var_update_responsibilities_threshold, 
-                                                       reynolds_adaptation,relevance_factor, alpha, gmm_machine->cxx));
-  return 0;
-
-}
-
-
-
-static int PyBobLearnMiscMAPGMMTrainer_init(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // If the constructor input is GMMBaseTrainer object
-  if(PyBobLearnMiscMAPGMMTrainer_Check(args))
-    return PyBobLearnMiscMAPGMMTrainer_init_copy(self, args, kwargs);
-  else{
-    return PyBobLearnMiscMAPGMMTrainer_init_base_trainer(self, args, kwargs);
-  }
-
-  BOB_CATCH_MEMBER("cannot create MAP_GMMTrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscMAPGMMTrainer_delete(PyBobLearnMiscMAPGMMTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscMAPGMMTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscMAPGMMTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscMAPGMMTrainer_RichCompare(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscMAPGMMTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscMAPGMMTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare MAP_GMMTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** relevance_factor *****/
-static auto relevance_factor = bob::extension::VariableDoc(
-  "relevance_factor",
-  "double",
-  "If set the reynolds_adaptation parameters, will apply the Reynolds Adaptation Factor. See Eq (14) from [Reynolds2000]_",
-  ""
-);
-PyObject* PyBobLearnMiscMAPGMMTrainer_getRelevanceFactor(PyBobLearnMiscMAPGMMTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getRelevanceFactor());
-  BOB_CATCH_MEMBER("relevance_factor could not be read", 0)
-}
-int PyBobLearnMiscMAPGMMTrainer_setRelevanceFactor(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  
-  if(!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, relevance_factor.name());
-    return -1;
-  }
-  
-  self->cxx->setRelevanceFactor(PyFloat_AS_DOUBLE(value));
-  return 0;
-  BOB_CATCH_MEMBER("relevance_factor could not be set", 0)
-}
-
-
-/***** alpha *****/
-static auto alpha = bob::extension::VariableDoc(
-  "alpha",
-  "double",
-  "Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.",
-  ""
-);
-PyObject* PyBobLearnMiscMAPGMMTrainer_getAlpha(PyBobLearnMiscMAPGMMTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getAlpha());
-  BOB_CATCH_MEMBER("alpha could not be read", 0)
-}
-int PyBobLearnMiscMAPGMMTrainer_setAlpha(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  
-  if(!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, alpha.name());
-    return -1;
-  }
-  
-  self->cxx->setAlpha(PyFloat_AS_DOUBLE(value));
-  return 0;
-  BOB_CATCH_MEMBER("alpha could not be set", 0)
-}
-
-
-
-static PyGetSetDef PyBobLearnMiscMAPGMMTrainer_getseters[] = { 
-  {
-    alpha.name(),
-    (getter)PyBobLearnMiscMAPGMMTrainer_getAlpha,
-    (setter)PyBobLearnMiscMAPGMMTrainer_setAlpha,
-    alpha.doc(),
-    0
-  },
-  {
-    relevance_factor.name(),
-    (getter)PyBobLearnMiscMAPGMMTrainer_getRelevanceFactor,
-    (setter)PyBobLearnMiscMAPGMMTrainer_setRelevanceFactor,
-    relevance_factor.doc(),
-    0
-  },
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "",
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscMAPGMMTrainer_initialize(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)){
-    PyErr_Format(PyExc_RuntimeError, "%s.%s. Was not possible to read :py:class:`bob.learn.misc.GMMMachine`", Py_TYPE(self)->tp_name, initialize.name());
-    Py_RETURN_NONE;
-  }
-  self->cxx->initialize(*gmm_machine->cxx);
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** eStep ***/
-static auto eStep = bob::extension::FunctionDoc(
-  "eStep",
-  "Calculates and saves statistics across the dataset,"
-  "and saves these as m_ss. ",
-
-  "Calculates the average log likelihood of the observations given the GMM,"
-  "and returns this in average_log_likelihood."
-  "The statistics, m_ss, will be used in the mStep() that follows.",
-
-  true
-)
-.add_prototype("gmm_machine,data")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscMAPGMMTrainer_eStep(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = eStep.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** mStep ***/
-static auto mStep = bob::extension::FunctionDoc(
-  "mStep",
-
-   "Performs a maximum a posteriori (MAP) update of the GMM:"  
-   "* parameters using the accumulated statistics in :py:class:`bob.learn.misc.GMMBaseTrainer.m_ss` and the" 
-   "* parameters of the prior model",
-  "",
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscMAPGMMTrainer_mStep(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = mStep.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  self->cxx->mStep(*gmm_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** computeLikelihood ***/
-static auto compute_likelihood = bob::extension::FunctionDoc(
-  "compute_likelihood",
-  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
-  0,
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscMAPGMMTrainer_compute_likelihood(PyBobLearnMiscMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = compute_likelihood.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
-}
-
-
-
-static PyMethodDef PyBobLearnMiscMAPGMMTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscMAPGMMTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    eStep.name(),
-    (PyCFunction)PyBobLearnMiscMAPGMMTrainer_eStep,
-    METH_VARARGS|METH_KEYWORDS,
-    eStep.doc()
-  },
-  {
-    mStep.name(),
-    (PyCFunction)PyBobLearnMiscMAPGMMTrainer_mStep,
-    METH_VARARGS|METH_KEYWORDS,
-    mStep.doc()
-  },
-  {
-    compute_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscMAPGMMTrainer_compute_likelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_likelihood.doc()
-  },
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscMAPGMMTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscMAPGMMTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_name      = MAP_GMMTrainer_doc.name();
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscMAPGMMTrainerObject);
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_doc       = MAP_GMMTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscMAPGMMTrainer_init);
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscMAPGMMTrainer_delete);
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_richcompare  = reinterpret_cast<richcmpfunc>(PyBobLearnMiscMAPGMMTrainer_RichCompare);
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_methods      = PyBobLearnMiscMAPGMMTrainer_methods;
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_getset       = PyBobLearnMiscMAPGMMTrainer_getseters;
-  PyBobLearnMiscMAPGMMTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscMAPGMMTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscMAPGMMTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscMAPGMMTrainer_Type);
-  return PyModule_AddObject(module, "_MAP_GMMTrainer", (PyObject*)&PyBobLearnMiscMAPGMMTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/ML_gmm_trainer.cpp b/bob/learn/misc/ML_gmm_trainer.cpp
deleted file mode 100644
index ff726092b0567ca19b94bf15394a9cd2452941a5..0000000000000000000000000000000000000000
--- a/bob/learn/misc/ML_gmm_trainer.cpp
+++ /dev/null
@@ -1,335 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Web 22 Jan 16:45:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-static auto ML_GMMTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".ML_GMMTrainer",
-  "This class implements the maximum likelihood M-step of the expectation-maximisation algorithm for a GMM Machine."
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a ML_GMMTrainer",
-    "",
-    true
-  )
-  .add_prototype("update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("update_means", "bool", "Update means on each iteration")
-  .add_parameter("update_variances", "bool", "Update variances on each iteration")
-  .add_parameter("update_weights", "bool", "Update weights on each iteration")
-  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
-
-
-  .add_parameter("other", ":py:class:`bob.learn.misc.ML_GMMTrainer`", "A ML_GMMTrainer object to be copied.")
-);
-
-
-static int PyBobLearnMiscMLGMMTrainer_init_copy(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ML_GMMTrainer_doc.kwlist(1);
-  PyBobLearnMiscMLGMMTrainerObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscMLGMMTrainer_Type, &o)){
-    ML_GMMTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ML_GMMTrainer(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscMLGMMTrainer_init_base_trainer(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ML_GMMTrainer_doc.kwlist(0);
-  
-  PyObject* update_means     = 0;
-  PyObject* update_variances = 0;
-  PyObject* update_weights   = 0;
-  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!O!d", kwlist, 
-                                   &PyBool_Type, &update_means, 
-                                   &PyBool_Type, &update_variances, 
-                                   &PyBool_Type, &update_weights, 
-                                   &mean_var_update_responsibilities_threshold)){
-    ML_GMMTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ML_GMMTrainer(f(update_means), f(update_variances), f(update_weights), 
-                                                       mean_var_update_responsibilities_threshold));
-  return 0;
-}
-
-
-
-static int PyBobLearnMiscMLGMMTrainer_init(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  //Reading the input argument
-  PyObject* arg = 0;
-  if (PyTuple_Size(args))
-    arg = PyTuple_GET_ITEM(args, 0);
-  else {
-    PyObject* tmp = PyDict_Values(kwargs);
-    auto tmp_ = make_safe(tmp);
-    arg = PyList_GET_ITEM(tmp, 0);
-  }
-
-  // If the constructor input is GMMBaseTrainer object
-  if (PyBobLearnMiscMLGMMTrainer_Check(arg))
-    return PyBobLearnMiscMLGMMTrainer_init_copy(self, args, kwargs);
-  else
-    return PyBobLearnMiscMLGMMTrainer_init_base_trainer(self, args, kwargs);
-
-
-
-  BOB_CATCH_MEMBER("cannot create GMMBaseTrainer_init_bool", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscMLGMMTrainer_delete(PyBobLearnMiscMLGMMTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscMLGMMTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscMLGMMTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscMLGMMTrainer_RichCompare(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscMLGMMTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscMLGMMTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare ML_GMMTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-static PyGetSetDef PyBobLearnMiscMLGMMTrainer_getseters[] = { 
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "",
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscMLGMMTrainer_initialize(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)){
-    PyErr_Format(PyExc_RuntimeError, "%s.%s. Was not possible to read :py:class:`bob.learn.misc.GMMMachine`", Py_TYPE(self)->tp_name, initialize.name());
-    Py_RETURN_NONE;
-  }
-  self->cxx->initialize(*gmm_machine->cxx);
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** eStep ***/
-static auto eStep = bob::extension::FunctionDoc(
-  "eStep",
-  "Calculates and saves statistics across the dataset,"
-  "and saves these as m_ss. ",
-
-  "Calculates the average log likelihood of the observations given the GMM,"
-  "and returns this in average_log_likelihood."
-  "The statistics, m_ss, will be used in the mStep() that follows.",
-
-  true
-)
-.add_prototype("gmm_machine,data")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscMLGMMTrainer_eStep(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = eStep.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** mStep ***/
-static auto mStep = bob::extension::FunctionDoc(
-  "mStep",
-  "Performs a maximum likelihood (ML) update of the GMM parameters "
-  "using the accumulated statistics in :py:class:`bob.learn.misc.GMMBaseTrainer.m_ss`",
-
-  "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006",
-
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscMLGMMTrainer_mStep(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = mStep.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  self->cxx->mStep(*gmm_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** computeLikelihood ***/
-static auto compute_likelihood = bob::extension::FunctionDoc(
-  "compute_likelihood",
-  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
-  0,
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscMLGMMTrainer_compute_likelihood(PyBobLearnMiscMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = compute_likelihood.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
-}
-
-
-
-static PyMethodDef PyBobLearnMiscMLGMMTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscMLGMMTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    eStep.name(),
-    (PyCFunction)PyBobLearnMiscMLGMMTrainer_eStep,
-    METH_VARARGS|METH_KEYWORDS,
-    eStep.doc()
-  },
-  {
-    mStep.name(),
-    (PyCFunction)PyBobLearnMiscMLGMMTrainer_mStep,
-    METH_VARARGS|METH_KEYWORDS,
-    mStep.doc()
-  },
-  {
-    compute_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscMLGMMTrainer_compute_likelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_likelihood.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscMLGMMTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscMLGMMTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscMLGMMTrainer_Type.tp_name      = ML_GMMTrainer_doc.name();
-  PyBobLearnMiscMLGMMTrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscMLGMMTrainerObject);
-  PyBobLearnMiscMLGMMTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
-  PyBobLearnMiscMLGMMTrainer_Type.tp_doc       = ML_GMMTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscMLGMMTrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscMLGMMTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscMLGMMTrainer_init);
-  PyBobLearnMiscMLGMMTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscMLGMMTrainer_delete);
-  PyBobLearnMiscMLGMMTrainer_Type.tp_richcompare  = reinterpret_cast<richcmpfunc>(PyBobLearnMiscMLGMMTrainer_RichCompare);
-  PyBobLearnMiscMLGMMTrainer_Type.tp_methods      = PyBobLearnMiscMLGMMTrainer_methods;
-  PyBobLearnMiscMLGMMTrainer_Type.tp_getset       = PyBobLearnMiscMLGMMTrainer_getseters;
-  PyBobLearnMiscMLGMMTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscMLGMMTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscMLGMMTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscMLGMMTrainer_Type);
-  return PyModule_AddObject(module, "_ML_GMMTrainer", (PyObject*)&PyBobLearnMiscMLGMMTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/__MAP_gmm_trainer__.py b/bob/learn/misc/__MAP_gmm_trainer__.py
deleted file mode 100644
index 4258b089c68d7bd3c639da304485cf23e5f6b810..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__MAP_gmm_trainer__.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Jan 23 18:31:10 2015
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _MAP_GMMTrainer
-import numpy
-
-# define the class
-class MAP_GMMTrainer(_MAP_GMMTrainer):
-
-  def __init__(self, prior_gmm, update_means=True, update_variances=False, update_weights=False, convergence_threshold=0.001, max_iterations=10, converge_by_likelihood=True, **kwargs):
-    """
-    :py:class:`bob.learn.misc.MAP_GMMTrainer` constructor
-
-    Keyword Parameters:
-      update_means
-
-      update_variances
-
-      update_weights
-
-      prior_gmm
-        A :py:class:`bob.learn.misc.GMMMachine` to be adapted
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      converge_by_likelihood
-        Tells whether we compute log_likelihood as a convergence criteria, or not 
-      alpha
-        Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.
-      relevance_factor
-        If set the :py:class:`bob.learn.misc.MAP_GMMTrainer.reynolds_adaptation` parameters, will apply the Reynolds Adaptation procedure. See Eq (14) from [Reynolds2000]_  
-    """
-
-    if kwargs.get('alpha')!=None:
-      alpha = kwargs.get('alpha')
-      _MAP_GMMTrainer.__init__(self, prior_gmm,alpha=alpha, update_means=update_means, update_variances=update_variances,update_weights=update_weights)
-    else:
-      relevance_factor = kwargs.get('relevance_factor')
-      _MAP_GMMTrainer.__init__(self, prior_gmm, relevance_factor=relevance_factor, update_means=update_means, update_variances=update_variances,update_weights=update_weights)
-    
-    self.convergence_threshold  = convergence_threshold
-    self.max_iterations         = max_iterations
-    self.converge_by_likelihood = converge_by_likelihood
-
-
- 
-
-
-  def train(self, gmm_machine, data):
-    """
-    Train the :py:class:bob.learn.misc.GMMMachine using data
-
-    Keyword Parameters:
-      gmm_machine
-        The :py:class:bob.learn.misc.GMMMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(gmm_machine);
-
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-
-    #eStep
-    self.eStep(gmm_machine, data);
-
-    if(self.converge_by_likelihood):
-      average_output = self.compute_likelihood(gmm_machine);    
-
-    for i in range(self.max_iterations):
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(gmm_machine);
-
-      #eStep
-      self.eStep(gmm_machine, data);
-
-      #Computes log likelihood if required
-      if(self.converge_by_likelihood):
-        average_output = self.compute_likelihood(gmm_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self.convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _MAP_GMMTrainer.__doc__
diff --git a/bob/learn/misc/__ML_gmm_trainer__.py b/bob/learn/misc/__ML_gmm_trainer__.py
deleted file mode 100644
index 93a3c6cdd8ee57c88631d54f3560e228dd1c61db..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__ML_gmm_trainer__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Jan 22 18:29:10 2015
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _ML_GMMTrainer
-import numpy
-
-# define the class
-class ML_GMMTrainer(_ML_GMMTrainer):
-
-  def __init__(self, update_means=True, update_variances=False, update_weights=False, convergence_threshold=0.001, max_iterations=10, converge_by_likelihood=True):
-    """
-    :py:class:bob.learn.misc.ML_GMMTrainer constructor
-
-    Keyword Parameters:
-      update_means
-
-      update_variances
-
-      update_weights
- 
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      converge_by_likelihood
-        Tells whether we compute log_likelihood as a convergence criteria, or not 
-        
-    """
-
-    _ML_GMMTrainer.__init__(self, update_means=update_means, update_variances=update_variances, update_weights=update_weights)
-    self.convergence_threshold  = convergence_threshold
-    self.max_iterations         = max_iterations
-    self.converge_by_likelihood = converge_by_likelihood
-
-
-  def train(self, gmm_machine, data):
-    """
-    Train the :py:class:bob.learn.misc.GMMMachine using data
-
-    Keyword Parameters:
-      gmm_machine
-        The :py:class:bob.learn.misc.GMMMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(gmm_machine);
-
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-
-    #eStep
-    self.eStep(gmm_machine, data);
-
-    if(self.converge_by_likelihood):
-      average_output = self.compute_likelihood(gmm_machine);    
-
-    for i in range(self.max_iterations):
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(gmm_machine);
-
-      #eStep
-      self.eStep(gmm_machine, data);
-
-      #Computes log likelihood if required
-      if(self.converge_by_likelihood):
-        average_output = self.compute_likelihood(gmm_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self.convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _ML_GMMTrainer.__doc__
diff --git a/bob/learn/misc/__empca_trainer__.py b/bob/learn/misc/__empca_trainer__.py
deleted file mode 100644
index 28f4c9dcf1a07f36251afef4fdcab8182825e0ff..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__empca_trainer__.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Wed Fev 04 13:35:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _EMPCATrainer
-import numpy
-
-# define the class
-class EMPCATrainer (_EMPCATrainer):
-
-  def __init__(self, convergence_threshold=0.001, max_iterations=10, compute_likelihood=True):
-    """
-    :py:class:`bob.learn.misc.EMPCATrainer` constructor
-
-    Keyword Parameters:
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      compute_likelihood
-        
-    """
-
-    _EMPCATrainer.__init__(self,convergence_threshold)
-    self._max_iterations        = max_iterations
-    self._compute_likelihood    = compute_likelihood
-
-
-  def train(self, linear_machine, data):
-    """
-    Train the :py:class:bob.learn.misc.LinearMachine using data
-
-    Keyword Parameters:
-      linear_machine
-        The :py:class:bob.learn.misc.LinearMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(linear_machine, data);
-      
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-    #eStep
-    self.eStep(linear_machine, data);
-
-    if(self._compute_likelihood):
-      average_output = self.compute_likelihood(linear_machine);
-    
-    for i in range(self._max_iterations):
-
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(linear_machine);
-
-      #eStep
-      self.eStep(linear_machine, data);
-
-      #Computes log likelihood if required
-      if(self._compute_likelihood):
-        average_output = self.compute_likelihood(linear_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self._convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _EMPCATrainer.__doc__
diff --git a/bob/learn/misc/__init__.py b/bob/learn/misc/__init__.py
deleted file mode 100644
index 4f39609df8e4a73433f3fda1aba17dcbcad3635b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__init__.py
+++ /dev/null
@@ -1,58 +0,0 @@
-# import Libraries of other lib packages
-import bob.io.base
-import bob.math
-import bob.learn.linear
-
-# import our own Library
-import bob.extension
-bob.extension.load_bob_library('bob.learn.misc', __file__)
-
-#from ._old_library import *
-from ._library import *
-from . import version
-from .version import module as __version__
-from .__kmeans_trainer__ import *
-from .__ML_gmm_trainer__ import *
-from .__MAP_gmm_trainer__ import *
-from .__jfa_trainer__ import *
-from .__isv_trainer__ import *
-from .__ivector_trainer__ import *
-from .__plda_trainer__ import *
-
-
-def ztnorm_same_value(vect_a, vect_b):
-  """Computes the matrix of boolean D for the ZT-norm, which indicates where
-     the client ids of the T-Norm models and Z-Norm samples match.
-
-     vect_a An (ordered) list of client_id corresponding to the T-Norm models
-     vect_b An (ordered) list of client_id corresponding to the Z-Norm impostor samples
-  """
-  import numpy
-  sameMatrix = numpy.ndarray((len(vect_a), len(vect_b)), 'bool')
-  for j in range(len(vect_a)):
-    for i in range(len(vect_b)):
-      sameMatrix[j, i] = (vect_a[j] == vect_b[i])
-  return sameMatrix
-
-
-def get_config():
-  """Returns a string containing the configuration information.
-  """
-
-  import pkg_resources
-  from .version import externals
-
-  packages = pkg_resources.require(__name__)
-  this = packages[0]
-  deps = packages[1:]
-
-  retval =  "%s: %s (%s)\n" % (this.key, this.version, this.location)
-  retval += "  - c/c++ dependencies:\n"
-  for k in sorted(externals): retval += "    - %s: %s\n" % (k, externals[k])
-  retval += "  - python dependencies:\n"
-  for d in deps: retval += "    - %s: %s (%s)\n" % (d.key, d.version, d.location)
-
-  return retval.strip()
-
-# gets sphinx autodoc done right - don't remove it
-__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/learn/misc/__isv_trainer__.py b/bob/learn/misc/__isv_trainer__.py
deleted file mode 100644
index 98a5457e3c4a24338881369e7e215d61515b1a01..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__isv_trainer__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Fev 02 21:40:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _ISVTrainer
-import numpy
-
-# define the class
-class ISVTrainer (_ISVTrainer):
-
-  def __init__(self, max_iterations=10, relevance_factor=4.):
-    """
-    :py:class:`bob.learn.misc.ISVTrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-    """
-    _ISVTrainer.__init__(self, relevance_factor)
-    self._max_iterations         = max_iterations
-
-
-  def train(self, isv_base, data):
-    """
-    Train the :py:class:`bob.learn.misc.ISVBase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.misc.ISVBase` class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(isv_base, data);
-      
-    for i in range(self._max_iterations):
-      #eStep
-      self.eStep(isv_base, data);
-      #mStep
-      self.mStep(isv_base);
-
-
-
-# copy the documentation from the base class
-__doc__ = _ISVTrainer.__doc__
diff --git a/bob/learn/misc/__ivector_trainer__.py b/bob/learn/misc/__ivector_trainer__.py
deleted file mode 100644
index a53a2d452eab0772390fdc4063ba3055690cdca1..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__ivector_trainer__.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Tue Fev 03 13:20:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _IVectorTrainer
-import numpy
-
-# define the class
-class IVectorTrainer (_IVectorTrainer):
-
-  def __init__(self, max_iterations=10, update_sigma=False):
-    """
-    :py:class:`bob.learn.misc.IVectorTrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-      update_sigma
-        
-    """
-    _IVectorTrainer.__init__(self, update_sigma)
-    self._max_iterations         = max_iterations
-
-
-  def train(self, ivector_machine, data):
-    """
-    Train the :py:class:`bob.learn.misc.IVectorMachine` using data
-
-    Keyword Parameters:
-      ivector_machine
-        The `:py:class:bob.learn.misc.IVectorMachine` class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(ivector_machine, data);
-      
-    for i in range(self._max_iterations):
-      #eStep
-      self.eStep(ivector_machine, data);
-      #mStep
-      self.mStep(ivector_machine);
-
-
-
-# copy the documentation from the base class
-__doc__ = _IVectorTrainer.__doc__
diff --git a/bob/learn/misc/__jfa_trainer__.py b/bob/learn/misc/__jfa_trainer__.py
deleted file mode 100644
index ad803ada4fa8b7e543d12d0b741160837a63719a..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__jfa_trainer__.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Sun Fev 01 21:10:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _JFATrainer
-import numpy
-
-# define the class
-class JFATrainer (_JFATrainer):
-
-  def __init__(self, max_iterations=10):
-    """
-    :py:class:`bob.learn.misc.JFATrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-    """
-
-    _JFATrainer.__init__(self)
-    self._max_iterations         = max_iterations
-
-
-  def train_loop(self, jfa_base, data):
-    """
-    Train the :py:class:`bob.learn.misc.JFABase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.misc.JFABase` class
-      data
-        The data to be trained
-    """
-    #V Subspace
-    for i in range(self._max_iterations):
-      self.e_step1(jfa_base, data)
-      self.m_step1(jfa_base, data)
-    self.finalize1(jfa_base, data)
-
-    #U subspace
-    for i in range(self._max_iterations):
-      self.e_step2(jfa_base, data)
-      self.m_step2(jfa_base, data)
-    self.finalize2(jfa_base, data)
-
-    # d subspace
-    for i in range(self._max_iterations):
-      self.e_step3(jfa_base, data)
-      self.m_step3(jfa_base, data)
-    self.finalize3(jfa_base, data)
-
-
-  def train(self, jfa_base, data):
-    """
-    Train the :py:class:`bob.learn.misc.JFABase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.misc.JFABase` class
-      data
-        The data to be trained
-    """
-    self.initialize(jfa_base, data)
-    self.train_loop(jfa_base, data)
-
-
-# copy the documentation from the base class
-__doc__ = _JFATrainer.__doc__
diff --git a/bob/learn/misc/__kmeans_trainer__.py b/bob/learn/misc/__kmeans_trainer__.py
deleted file mode 100644
index afe4949366f936138d11ada07f87c0c3355b3a87..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__kmeans_trainer__.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Jan 19 11:35:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _KMeansTrainer
-import numpy
-
-# define the class
-class KMeansTrainer (_KMeansTrainer):
-
-  def __init__(self, initialization_method="RANDOM", convergence_threshold=0.001, max_iterations=10, converge_by_average_min_distance=True):
-    """
-    :py:class:`bob.learn.misc.KMeansTrainer` constructor
-
-    Keyword Parameters:
-      initialization_method
-        The initialization method to generate the initial means
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      converge_by_average_min_distance
-        Tells whether we compute the average min (square Euclidean) distance, as a convergence criteria, or not 
-        
-    """
-
-    _KMeansTrainer.__init__(self, initialization_method="RANDOM", )
-    self._convergence_threshold = convergence_threshold
-    self._max_iterations         = max_iterations
-    self._converge_by_average_min_distance = converge_by_average_min_distance
-
-
-  def train(self, kmeans_machine, data):
-    """
-    Train the :py:class:bob.learn.misc.KMeansMachine using data
-
-    Keyword Parameters:
-      kmeans_machine
-        The :py:class:bob.learn.misc.KMeansMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(kmeans_machine, data);
-      
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-    #eStep
-    self.eStep(kmeans_machine, data);
-
-    if(self._converge_by_average_min_distance):
-      average_output = self.compute_likelihood(kmeans_machine);
-    
-    for i in range(self._max_iterations):
-
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(kmeans_machine);
-
-      #eStep
-      self.eStep(kmeans_machine, data);
-
-      #Computes log likelihood if required
-      if(self._converge_by_average_min_distance):
-        average_output = self.compute_likelihood(kmeans_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self._convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _KMeansTrainer.__doc__
diff --git a/bob/learn/misc/__plda_trainer__.py b/bob/learn/misc/__plda_trainer__.py
deleted file mode 100644
index 0fc0ee3599e21979f5e1d3693c5bad3ab2a2dd2c..0000000000000000000000000000000000000000
--- a/bob/learn/misc/__plda_trainer__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Fev 02 21:40:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _PLDATrainer
-import numpy
-
-# define the class
-class PLDATrainer (_PLDATrainer):
-
-  def __init__(self, max_iterations=10, use_sum_second_order=False):
-    """
-    :py:class:`bob.learn.misc.PLDATrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-    """
-    _PLDATrainer.__init__(self, use_sum_second_order)
-    self._max_iterations         = max_iterations
-
-
-  def train(self, plda_base, data):
-    """
-    Train the :py:class:`bob.learn.misc.PLDABase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.misc.PLDABase` class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(plda_base, data);
-      
-    for i in range(self._max_iterations):
-      #eStep
-      self.e_step(plda_base, data);
-      #mStep
-      self.m_step(plda_base, data);
-    self.finalize(plda_base, data);
-
-
-
-# copy the documentation from the base class
-__doc__ = _PLDATrainer.__doc__
diff --git a/bob/learn/misc/cpp/BICMachine.cpp b/bob/learn/misc/cpp/BICMachine.cpp
deleted file mode 100644
index 4bf047f658c401549b1ee6247a58c49c1acefda2..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/BICMachine.cpp
+++ /dev/null
@@ -1,348 +0,0 @@
-/**
- * @date Tue Jun  5 16:54:27 CEST 2012
- * @author Manuel Guenther <Manuel.Guenther@idiap.ch>
- *
- * A machine that implements the liner projection of input to the output using
- * weights, biases and sums:
- * output = sum(inputs * weights) + bias
- * It is possible to setup the machine to previously normalize the input taking
- * into consideration some input bias and division factor. It is also possible
- * to set it up to have an activation function.
- * A linear classifier. See C. M. Bishop, "Pattern Recognition and Machine
- * Learning", chapter 4
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/BICMachine.h>
-#include <bob.math/linear.h>
-#include <bob.core/assert.h>
-#include <bob.core/check.h>
-
-/**
- * Initializes an empty BIC Machine
- *
- * @param use_DFFS  Add the Distance From Feature Space during score computation?
- */
-bob::learn::misc::BICMachine::BICMachine(bool use_DFFS)
-:
-  m_project_data(use_DFFS),
-  m_use_DFFS(use_DFFS)
-{}
-
-/**
- * Assigns the other BICMachine to this, i.e., makes a deep copy of the given machine.
- *
- * @param  other  The other BICMachine to get a shallow copy of
- * @return a reference to *this
- */
-bob::learn::misc::BICMachine::BICMachine(const BICMachine& other)
-:
-  m_project_data(other.m_project_data),
-  m_use_DFFS(other.m_use_DFFS)
-{
-  if (m_project_data){
-    setBIC(false, other.m_mu_I, other.m_lambda_I, other.m_Phi_I, other.m_rho_I, true);
-    setBIC(true , other.m_mu_E, other.m_lambda_E, other.m_Phi_E, other.m_rho_E, true);
-  } else {
-    setIEC(false, other.m_mu_I, other.m_lambda_I, true);
-    setIEC(true , other.m_mu_E, other.m_lambda_E, true);
-  }
-}
-
-/**
- * Assigns the other BICMachine to this, i.e., makes a deep copy of the given BICMachine
- *
- * @param  other  The other BICMachine to get a deep copy of
- * @return a reference to *this
- */
-bob::learn::misc::BICMachine& bob::learn::misc::BICMachine::operator=(const BICMachine& other)
-{
-  if (this != &other)
-  {
-    if (other.m_project_data){
-      m_use_DFFS = other.m_use_DFFS;
-      setBIC(false, other.m_mu_I, other.m_lambda_I, other.m_Phi_I, other.m_rho_I, true);
-      setBIC(true , other.m_mu_E, other.m_lambda_E, other.m_Phi_E, other.m_rho_E, true);
-    } else {
-      m_use_DFFS = false;
-      setIEC(false, other.m_mu_I, other.m_lambda_I, true);
-      setIEC(true , other.m_mu_E, other.m_lambda_E, true);
-    }
-  }
-  return *this;
-}
-
-/**
- * Compares if this machine and the given one are identical
- *
- * @param  other  The BICMachine to compare with
- * @return true if both machines are identical, i.e., have exactly the same parameters, otherwise false
- */
-bool bob::learn::misc::BICMachine::operator==(const BICMachine& other) const
-{
-  return (m_project_data == other.m_project_data &&
-          (!m_project_data || m_use_DFFS == other.m_use_DFFS) &&
-          bob::core::array::isEqual(m_mu_I, other.m_mu_I) &&
-          bob::core::array::isEqual(m_mu_E, other.m_mu_E) &&
-          bob::core::array::isEqual(m_lambda_I, other.m_lambda_I) &&
-          bob::core::array::isEqual(m_lambda_E, other.m_lambda_E) &&
-          (!m_project_data ||
-              (bob::core::array::isEqual(m_Phi_I, other.m_Phi_I) &&
-               bob::core::array::isEqual(m_Phi_E, other.m_Phi_E) &&
-               (!m_use_DFFS || (m_rho_I == other.m_rho_I && m_rho_E == other.m_rho_E)))));
-}
-
-/**
- * Checks if this machine and the given one are different
- *
- * @param  other  The BICMachine to compare with
- * @return false if both machines are identical, i.e., have exactly the same parameters, otherwise true
- */
-bool bob::learn::misc::BICMachine::operator!=(const BICMachine& other) const
-{
-  return !(this->operator==(other));
-}
-
-/**
- * Compares the given machine with this for similarity
- *
- * @param  other  The BICMachine to compare with
- * @param  r_epsilon  The largest value any parameter might relatively differ between the two machines
- * @param  a_epsilon  The largest value any parameter might absolutely differ between the two machines
-
- * @return true if both machines are approximately equal, otherwise false
- */
-bool bob::learn::misc::BICMachine::is_similar_to(const BICMachine& other,
-  const double r_epsilon, const double a_epsilon) const
-{
-  if (m_project_data){
-    // compare data
-    if (not bob::core::array::hasSameShape(m_Phi_I, other.m_Phi_I)) return false;
-    if (not bob::core::array::hasSameShape(m_Phi_E, other.m_Phi_E)) return false;
-    // check that the projection matrices are close,
-    // but allow that eigen vectors might have opposite directions
-    // (i.e., they are either identical -> difference is 0, or opposite -> sum is zero)
-    for (int i = m_Phi_I.extent(1); i--;){
-      const blitz::Array<double,1>& sub1 = m_Phi_I(blitz::Range::all(), i);
-      const blitz::Array<double,1>& sub2 = other.m_Phi_I(blitz::Range::all(), i);
-      blitz::Array<double,1> sub2_negative(-sub2);
-      if (!bob::core::array::isClose(sub1, sub2, r_epsilon, a_epsilon) && !bob::core::array::isClose(sub1, sub2_negative, r_epsilon, a_epsilon)) return false;
-    }
-    for (int i = m_Phi_E.shape()[1]; i--;){
-      const blitz::Array<double,1>& sub1 = m_Phi_E(blitz::Range::all(), i);
-      const blitz::Array<double,1>& sub2 = other.m_Phi_E(blitz::Range::all(), i);
-      blitz::Array<double,1> sub2_negative(-sub2);
-      if (!bob::core::array::isClose(sub1, sub2, r_epsilon, a_epsilon) && !bob::core::array::isClose(sub1, sub2_negative, r_epsilon, a_epsilon)) return false;
-    }
-  }
-
-  return (m_project_data == other.m_project_data &&
-          (!m_project_data || m_use_DFFS == other.m_use_DFFS) &&
-          bob::core::array::isClose(m_mu_I, other.m_mu_I, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_mu_E, other.m_mu_E, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_lambda_I, other.m_lambda_I, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_lambda_E, other.m_lambda_E, r_epsilon, a_epsilon) &&
-          (!m_project_data ||
-               (!m_use_DFFS || (bob::core::isClose(m_rho_I, other.m_rho_I, r_epsilon, a_epsilon) &&
-                                bob::core::isClose(m_rho_E, other.m_rho_E, r_epsilon, a_epsilon)))));
-}
-
-
-
-void bob::learn::misc::BICMachine::initialize(bool clazz, int input_length, int projected_length){
-  blitz::Array<double,1>& diff = clazz ? m_diff_E : m_diff_I;
-  blitz::Array<double,1>& proj = clazz ? m_proj_E : m_proj_I;
-  diff.resize(input_length);
-  proj.resize(projected_length);
-}
-
-/**
- * Sets the parameters of the given class that are required for computing the IEC scores (Guenther, Wuertz)
- *
- * @param  clazz   false for the intrapersonal class, true for the extrapersonal one.
- * @param  mean    The mean vector of the training data
- * @param  variances  The variances of the training data
- * @param  copy_data  If true, makes a deep copy of the matrices, otherwise it just references it (the default)
- */
-void bob::learn::misc::BICMachine::setIEC(
-    bool clazz,
-    const blitz::Array<double,1>& mean,
-    const blitz::Array<double,1>& variances,
-    bool copy_data
-){
-  m_project_data = false;
-  // select the right matrices to write
-  blitz::Array<double,1>& mu = clazz ? m_mu_E : m_mu_I;
-  blitz::Array<double,1>& lambda = clazz ? m_lambda_E : m_lambda_I;
-
-  // copy mean and variances
-  if (copy_data){
-    mu.resize(mean.shape());
-    mu = mean;
-    lambda.resize(variances.shape());
-    lambda = variances;
-  } else {
-    mu.reference(mean);
-    lambda.reference(variances);
-  }
-}
-
-/**
- * Sets the parameters of the given class that are required for computing the BIC scores (Teixeira)
- *
- * @param  clazz   false for the intrapersonal class, true for the extrapersonal one.
- * @param  mean    The mean vector of the training data
- * @param  variances  The eigenvalues of the training data
- * @param  projection  The PCA projection matrix
- * @param  rho     The residual eigenvalues, used for DFFS calculation
- * @param  copy_data  If true, makes a deep copy of the matrices, otherwise it just references it (the default)
- */
-void bob::learn::misc::BICMachine::setBIC(
-    bool clazz,
-    const blitz::Array<double,1>& mean,
-    const blitz::Array<double,1>& variances,
-    const blitz::Array<double,2>& projection,
-    const double rho,
-    bool copy_data
-){
-  m_project_data = true;
-  // select the right matrices to write
-  blitz::Array<double,1>& mu = clazz ? m_mu_E : m_mu_I;
-  blitz::Array<double,1>& lambda = clazz ? m_lambda_E : m_lambda_I;
-  blitz::Array<double,2>& Phi = clazz ? m_Phi_E : m_Phi_I;
-  double& rho_ = clazz ? m_rho_E : m_rho_I;
-
-  // copy information
-  if (copy_data){
-    mu.resize(mean.shape());
-    mu = mean;
-    lambda.resize(variances.shape());
-    lambda = variances;
-    Phi.resize(projection.shape());
-    Phi = projection;
-  } else {
-    mu.reference(mean);
-    lambda.reference(variances);
-    Phi.reference(projection);
-  }
-  rho_ = rho;
-
-  // check that rho has a reasonable value (if it is used)
-  if (m_use_DFFS && rho_ < 1e-12) throw std::runtime_error("The given average eigenvalue (rho) is too close to zero");
-
-  // initialize temporaries
-  initialize(clazz, Phi.shape()[0], Phi.shape()[1]);
-}
-
-/**
- * Set or unset the usage of the Distance From Feature Space
- *
- * @param use_DFFS The new value of use_DFFS
- */
-void bob::learn::misc::BICMachine::use_DFFS(bool use_DFFS){
-  m_use_DFFS = use_DFFS;
-  if (m_project_data && m_use_DFFS && (m_rho_E < 1e-12 || m_rho_I < 1e-12)) std::runtime_error("The average eigenvalue (rho) is too close to zero, so using DFFS will not work");
-}
-
-/**
- * Loads the BICMachine from the given hdf5 file.
- *
- * @param  config  The hdf5 file containing the required information.
- */
-void bob::learn::misc::BICMachine::load(bob::io::base::HDF5File& config){
-  //reads all data directly into the member variables
-  m_project_data = config.read<bool>("project_data");
-  m_mu_I.reference(config.readArray<double,1>("intra_mean"));
-  m_lambda_I.reference(config.readArray<double,1>("intra_variance"));
-  if (m_project_data){
-    m_use_DFFS = config.read<bool>("use_DFFS");
-    m_Phi_I.reference(config.readArray<double,2>("intra_subspace"));
-    initialize(false, m_Phi_I.shape()[0], m_Phi_I.shape()[1]);
-    m_rho_I = config.read<double>("intra_rho");
-  }
-
-  m_mu_E.reference(config.readArray<double,1>("extra_mean"));
-  m_lambda_E.reference(config.readArray<double,1>("extra_variance"));
-  if (m_project_data){
-    m_Phi_E.reference(config.readArray<double,2>("extra_subspace"));
-    initialize(true, m_Phi_E.shape()[0], m_Phi_E.shape()[1]);
-    m_rho_E = config.read<double>("extra_rho");
-  }
-  // check that rho has reasonable values
-  if (m_project_data && m_use_DFFS && (m_rho_E < 1e-12 || m_rho_I < 1e-12)) throw std::runtime_error("The loaded average eigenvalue (rho) is too close to zero");
-
-}
-
-/**
- * Saves the parameters of the BICMachine to the given hdf5 file.
- *
- * @param  config  The hdf5 file to write the configuration into.
- */
-void bob::learn::misc::BICMachine::save(bob::io::base::HDF5File& config) const{
-  config.set("project_data", m_project_data);
-  config.setArray("intra_mean", m_mu_I);
-  config.setArray("intra_variance", m_lambda_I);
-  if (m_project_data){
-    config.set("use_DFFS", m_use_DFFS);
-    config.setArray("intra_subspace", m_Phi_I);
-    config.set("intra_rho", m_rho_I);
-  }
-
-  config.setArray("extra_mean", m_mu_E);
-  config.setArray("extra_variance", m_lambda_E);
-  if (m_project_data){
-    config.setArray("extra_subspace", m_Phi_E);
-    config.set("extra_rho", m_rho_E);
-  }
-}
-
-/**
- * Computes the BIC or IEC score for the given input vector.
- * The score itself is the log-likelihood score of the given input vector belonging to the intrapersonal class.
- * No sanity checks of input and output are performed.
- *
- * @param  input  A vector (of difference values) to compute the BIC or IEC score for.
- * @param  output The one-element array that will contain the score afterwards.
- */
-void bob::learn::misc::BICMachine::forward_(const blitz::Array<double,1>& input, double& output) const{
-  if (m_project_data){
-    // subtract mean
-    m_diff_I = input - m_mu_I;
-    m_diff_E = input - m_mu_E;
-    // project data to intrapersonal and extrapersonal subspace
-    bob::math::prod(m_diff_I, m_Phi_I, m_proj_I);
-    bob::math::prod(m_diff_E, m_Phi_E, m_proj_E);
-
-    // compute Mahalanobis distance
-    output = blitz::sum(blitz::pow2(m_proj_E) / m_lambda_E) - blitz::sum(blitz::pow2(m_proj_I) / m_lambda_I);
-
-    // add the DFFS?
-    if (m_use_DFFS){
-      output += blitz::sum(blitz::pow2(m_diff_E) - blitz::pow2(m_proj_E)) / m_rho_E;
-      output -= blitz::sum(blitz::pow2(m_diff_I) - blitz::pow2(m_proj_I)) / m_rho_I;
-    }
-    output /= (m_proj_E.extent(0) + m_proj_I.extent(0));
-  } else {
-    // forward without projection
-    output = blitz::mean( blitz::pow2(input - m_mu_E) / m_lambda_E
-                        - blitz::pow2(input - m_mu_I) / m_lambda_I);
-  }
-}
-
-/**
- * Computes the BIC or IEC score for the given input vector.
- * The score itself is the log-likelihood score of the given input vector belonging to the intrapersonal class.
- * Sanity checks of input and output shape are performed.
- *
- * @param  input  A vector (of difference values) to compute the BIC or IEC score for.
- * @param  output The one-element array that will contain the score afterwards.
- */
-void bob::learn::misc::BICMachine::forward(const blitz::Array<double,1>& input, double& output) const{
-  // perform some checks
-  bob::core::array::assertSameShape(input, m_mu_E);
-
-  // call the actual method
-  forward_(input, output);
-}
-
diff --git a/bob/learn/misc/cpp/BICTrainer.cpp b/bob/learn/misc/cpp/BICTrainer.cpp
deleted file mode 100644
index 475fad9d2ce7138a87ad05def1c1aed11c4557c6..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/BICTrainer.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * @date Wed Jun  6 10:29:09 CEST 2012
- * @author Manuel Guenther <Manuel.Guenther@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/BICTrainer.h>
-#include <bob.learn.linear/pca.h>
-#include <bob.learn.linear/machine.h>
-
-static double sqr(const double& x){
-  return x*x;
-}
-
-/**
- * This function trains one of the classes of the given machine with the given data.
- * It computes either BIC projection matrices, or IEC mean and variance.
- *
- * @param  clazz    false for the intrapersonal class, true for the extrapersonal one.
- * @param  machine  The machine to be trained.
- * @param  differences  A set of (intra/extra)-personal difference vectors that should be trained.
- */
-void bob::learn::misc::BICTrainer::train_single(bool clazz, bob::learn::misc::BICMachine& machine, const blitz::Array<double,2>& differences) const {
-  int subspace_dim = clazz ? m_M_E : m_M_I;
-  int input_dim = differences.extent(1);
-  int data_count = differences.extent(0);
-  blitz::Range a = blitz::Range::all();
-
-  if (subspace_dim){
-    // train the class using BIC
-
-    // Compute PCA on the given dataset
-    bob::learn::linear::PCATrainer trainer;
-    const int n_eigs = trainer.output_size(differences);
-    bob::learn::linear::Machine pca(input_dim, n_eigs);
-    blitz::Array<double,1> variances(n_eigs);
-    trainer.train(pca, variances, differences);
-
-    // compute rho
-    double rho = 0.;
-    int non_zero_eigenvalues = std::min(input_dim, data_count-1);
-    // assert that the number of kept eigenvalues is not chosen to big
-    if (subspace_dim >= non_zero_eigenvalues)
-      throw std::runtime_error((boost::format("The chosen subspace dimension %d is larger than the theoretical number of nonzero eigenvalues %d")%subspace_dim%non_zero_eigenvalues).str());
-    // compute the average of the reminding eigenvalues
-    for (int i = subspace_dim; i < non_zero_eigenvalues; ++i){
-      rho += variances(i);
-    }
-    rho /= non_zero_eigenvalues - subspace_dim;
-
-    // limit dimensionalities
-    pca.resize(input_dim, subspace_dim);
-    variances.resizeAndPreserve(subspace_dim);
-
-    // check that all variances are meaningful
-    for (int i = 0; i < subspace_dim; ++i){
-      if (variances(i) < 1e-12)
-        throw std::runtime_error((boost::format("The chosen subspace dimension is %d, but the %dth eigenvalue is already to small")%subspace_dim%i).str());
-    }
-
-    // initialize the machine
-    blitz::Array<double, 2> projection = pca.getWeights();
-    blitz::Array<double, 1> mean = pca.getInputSubtraction();
-    machine.setBIC(clazz, mean, variances, projection, rho);
-  } else {
-    // train the class using IEC
-    // => compute mean and variance only
-    blitz::Array<double,1> mean(input_dim), variance(input_dim);
-
-    // compute mean and variance
-    mean = 0.;
-    variance = 0.;
-    for (int n = data_count; n--;){
-      const blitz::Array<double,1>& diff = differences(n,a);
-      assert(diff.shape()[0] == input_dim);
-      for (int i = input_dim; i--;){
-        mean(i) += diff(i);
-        variance(i) += sqr(diff(i));
-      }
-    }
-    // normalize mean and variances
-    for (int i = 0; i < input_dim; ++i){
-      // intrapersonal
-      variance(i) = (variance(i) - sqr(mean(i)) / data_count) / (data_count - 1.);
-      mean(i) /= data_count;
-      if (variance(i) < 1e-12)
-        throw std::runtime_error((boost::format("The variance of the %dth dimension is too small. Check your data!")%i).str());
-    }
-
-    // set the results to the machine
-    machine.setIEC(clazz, mean, variance);
-  }
-}
diff --git a/bob/learn/misc/cpp/EMPCATrainer.cpp b/bob/learn/misc/cpp/EMPCATrainer.cpp
deleted file mode 100644
index 60dffc7389344fbd16b65666adbc0c1d9f333650..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/EMPCATrainer.cpp
+++ /dev/null
@@ -1,418 +0,0 @@
-/**
- * @date Tue Oct 11 12:18:23 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <vector>
-#include <algorithm>
-#include <boost/random.hpp>
-#include <cmath>
-
-#include <bob.learn.misc/EMPCATrainer.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/check.h>
-#include <bob.math/linear.h>
-#include <bob.math/det.h>
-#include <bob.math/inv.h>
-#include <bob.math/stats.h>
-
-bob::learn::misc::EMPCATrainer::EMPCATrainer(bool compute_likelihood):
-  m_compute_likelihood(compute_likelihood),
-  m_rng(new boost::mt19937()),
-  m_S(0,0),
-  m_z_first_order(0,0), m_z_second_order(0,0,0),
-  m_inW(0,0), m_invM(0,0), m_sigma2(0), m_f_log2pi(0),
-  m_tmp_dxf(0,0), m_tmp_d(0), m_tmp_f(0),
-  m_tmp_dxd_1(0,0), m_tmp_dxd_2(0,0),
-  m_tmp_fxd_1(0,0), m_tmp_fxd_2(0,0),
-  m_tmp_fxf_1(0,0), m_tmp_fxf_2(0,0)
-{
-}
-
-bob::learn::misc::EMPCATrainer::EMPCATrainer(const bob::learn::misc::EMPCATrainer& other):
-  m_compute_likelihood(other.m_compute_likelihood),
-  m_rng(other.m_rng),
-  m_S(bob::core::array::ccopy(other.m_S)),
-  m_z_first_order(bob::core::array::ccopy(other.m_z_first_order)),
-  m_z_second_order(bob::core::array::ccopy(other.m_z_second_order)),
-  m_inW(bob::core::array::ccopy(other.m_inW)),
-  m_invM(bob::core::array::ccopy(other.m_invM)),
-  m_sigma2(other.m_sigma2), m_f_log2pi(other.m_f_log2pi),
-  m_tmp_dxf(bob::core::array::ccopy(other.m_tmp_dxf)),
-  m_tmp_d(bob::core::array::ccopy(other.m_tmp_d)),
-  m_tmp_f(bob::core::array::ccopy(other.m_tmp_f)),
-  m_tmp_dxd_1(bob::core::array::ccopy(other.m_tmp_dxd_1)),
-  m_tmp_dxd_2(bob::core::array::ccopy(other.m_tmp_dxd_2)),
-  m_tmp_fxd_1(bob::core::array::ccopy(other.m_tmp_fxd_1)),
-  m_tmp_fxd_2(bob::core::array::ccopy(other.m_tmp_fxd_2)),
-  m_tmp_fxf_1(bob::core::array::ccopy(other.m_tmp_fxf_1)),
-  m_tmp_fxf_2(bob::core::array::ccopy(other.m_tmp_fxf_2))
-{
-}
-
-bob::learn::misc::EMPCATrainer::~EMPCATrainer()
-{
-}
-
-bob::learn::misc::EMPCATrainer& bob::learn::misc::EMPCATrainer::operator=
-  (const bob::learn::misc::EMPCATrainer& other)
-{
-  if (this != &other)
-  {
-    m_rng                   = other.m_rng;
-	m_compute_likelihood    = other.m_compute_likelihood;
-    m_S = bob::core::array::ccopy(other.m_S);
-    m_z_first_order = bob::core::array::ccopy(other.m_z_first_order);
-    m_z_second_order = bob::core::array::ccopy(other.m_z_second_order);
-    m_inW = bob::core::array::ccopy(other.m_inW);
-    m_invM = bob::core::array::ccopy(other.m_invM);
-    m_sigma2 = other.m_sigma2;
-    m_f_log2pi = other.m_f_log2pi;
-    m_tmp_dxf = bob::core::array::ccopy(other.m_tmp_dxf);
-    m_tmp_d = bob::core::array::ccopy(other.m_tmp_d);
-    m_tmp_f = bob::core::array::ccopy(other.m_tmp_f);
-    m_tmp_dxd_1 = bob::core::array::ccopy(other.m_tmp_dxd_1);
-    m_tmp_dxd_2 = bob::core::array::ccopy(other.m_tmp_dxd_2);
-    m_tmp_fxd_1 = bob::core::array::ccopy(other.m_tmp_fxd_1);
-    m_tmp_fxd_2 = bob::core::array::ccopy(other.m_tmp_fxd_2);
-    m_tmp_fxf_1 = bob::core::array::ccopy(other.m_tmp_fxf_1);
-    m_tmp_fxf_2 = bob::core::array::ccopy(other.m_tmp_fxf_2);
-  }
-  return *this;
-}
-
-bool bob::learn::misc::EMPCATrainer::operator==
-  (const bob::learn::misc::EMPCATrainer &other) const
-{
-  return m_compute_likelihood == other.m_compute_likelihood &&
-        m_rng                   == other.m_rng &&
-        bob::core::array::isEqual(m_S, other.m_S) &&
-        bob::core::array::isEqual(m_z_first_order, other.m_z_first_order) &&
-        bob::core::array::isEqual(m_z_second_order, other.m_z_second_order) &&
-        bob::core::array::isEqual(m_inW, other.m_inW) &&
-        bob::core::array::isEqual(m_invM, other.m_invM) &&
-        m_sigma2 == other.m_sigma2 &&
-        m_f_log2pi == other.m_f_log2pi;
-}
-
-bool bob::learn::misc::EMPCATrainer::operator!=
-  (const bob::learn::misc::EMPCATrainer &other) const
-{
-  return !(this->operator==(other));
-}
-
-bool bob::learn::misc::EMPCATrainer::is_similar_to
-  (const bob::learn::misc::EMPCATrainer &other, const double r_epsilon,
-   const double a_epsilon) const
-{
-  return m_compute_likelihood == other.m_compute_likelihood &&
-         m_rng                == other.m_rng &&
-         bob::core::array::isClose(m_S, other.m_S, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_z_first_order, other.m_z_first_order, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_z_second_order, other.m_z_second_order, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_inW, other.m_inW, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_invM, other.m_invM, r_epsilon, a_epsilon) &&
-         bob::core::isClose(m_sigma2, other.m_sigma2, r_epsilon, a_epsilon) &&
-         bob::core::isClose(m_f_log2pi, other.m_f_log2pi, r_epsilon, a_epsilon);
-}
-
-void bob::learn::misc::EMPCATrainer::initialize(bob::learn::linear::Machine& machine,
-  const blitz::Array<double,2>& ar)
-{
-  // reinitializes array members and checks dimensionality
-  initMembers(machine, ar);
-
-  // computes the mean and the covariance if required
-  computeMeanVariance(machine, ar);
-
-  // Random initialization of W and sigma2
-  initRandomWSigma2(machine);
-
-  // Computes the product m_inW = W^T.W
-  computeWtW(machine);
-  // Computes inverse(M), where M = Wt * W + sigma2 * Id
-  computeInvM();
-}
-
-
-void bob::learn::misc::EMPCATrainer::initMembers(
-  const bob::learn::linear::Machine& machine,
-  const blitz::Array<double,2>& ar)
-{
-  // Gets dimensions
-  const size_t n_samples = ar.extent(0);
-  const size_t n_features = ar.extent(1);
-
-  // Checks that the dimensions are matching
-  const size_t n_inputs = machine.inputSize();
-  const size_t n_outputs = machine.outputSize();
-
-  // Checks that the dimensions are matching
-  if (n_inputs != n_features) {
-    boost::format m("number of inputs (%u) does not match the number of features (%u)");
-    m % n_inputs % n_features;
-    throw std::runtime_error(m.str());
-  }
-
-  // Covariance matrix S is only required to compute the log likelihood
-  if (m_compute_likelihood)
-    m_S.resize(n_features,n_features);
-  else
-    m_S.resize(0,0);
-  m_z_first_order.resize(n_samples, n_outputs);
-  m_z_second_order.resize(n_samples, n_outputs, n_outputs);
-  m_inW.resize(n_outputs, n_outputs);
-  m_invM.resize(n_outputs, n_outputs);
-  m_sigma2 = 0.;
-  m_f_log2pi = n_features * log(2*M_PI);
-
-  // Cache
-  m_tmp_dxf.resize(n_outputs, n_features);
-  m_tmp_d.resize(n_outputs);
-  m_tmp_f.resize(n_features);
-  m_tmp_dxd_1.resize(n_outputs, n_outputs);
-  m_tmp_dxd_2.resize(n_outputs, n_outputs);
-  m_tmp_fxd_1.resize(n_features, n_outputs);
-  m_tmp_fxd_2.resize(n_features, n_outputs);
-  // The following large cache matrices are only required to compute the
-  // log likelihood.
-  if (m_compute_likelihood)
-  {
-    m_tmp_fxf_1.resize(n_features, n_features);
-    m_tmp_fxf_2.resize(n_features, n_features);
-  }
-  else
-  {
-    m_tmp_fxf_1.resize(0,0);
-    m_tmp_fxf_2.resize(0,0);
-  }
-}
-
-void bob::learn::misc::EMPCATrainer::computeMeanVariance(bob::learn::linear::Machine& machine,
-  const blitz::Array<double,2>& ar)
-{
-  size_t n_samples = ar.extent(0);
-  blitz::Array<double,1> mu = machine.updateInputSubtraction();
-  blitz::Range all = blitz::Range::all();
-  if (m_compute_likelihood)
-  {
-    // Mean and scatter computation
-    bob::math::scatter(ar, m_S, mu);
-    // divides scatter by N-1
-    m_S /= static_cast<double>(n_samples-1);
-  }
-  else
-  {
-    // computes the mean and updates mu
-    mu = 0.;
-    for (size_t i=0; i<n_samples; ++i)
-      mu += ar(i,all);
-    mu /= static_cast<double>(n_samples);
-  }
-}
-
-void bob::learn::misc::EMPCATrainer::initRandomWSigma2(bob::learn::linear::Machine& machine)
-{
-  // Initializes the random number generator
-  boost::uniform_01<> range01;
-  boost::variate_generator<boost::mt19937&, boost::uniform_01<> > die(*m_rng, range01);
-
-  // W initialization (TODO: add method in core)
-  blitz::Array<double,2> W = machine.updateWeights();
-  double ratio = 2.; /// Follows matlab implementation using a ratio of 2
-  for (int i=0; i<W.extent(0); ++i)
-    for (int j=0; j<W.extent(1); ++j)
-      W(i,j) = die() * ratio;
-  // sigma2 initialization
-  m_sigma2 = die() * ratio;
-}
-
-void bob::learn::misc::EMPCATrainer::computeWtW(bob::learn::linear::Machine& machine)
-{
-  const blitz::Array<double,2> W = machine.getWeights();
-  const blitz::Array<double,2> Wt = W.transpose(1,0);
-  bob::math::prod(Wt, W, m_inW);
-}
-
-void bob::learn::misc::EMPCATrainer::computeInvM()
-{
-  // Compute inverse(M), where M = W^T * W + sigma2 * Id
-  bob::math::eye(m_tmp_dxd_1); // m_tmp_dxd_1 = Id
-  m_tmp_dxd_1 *= m_sigma2; // m_tmp_dxd_1 = sigma2 * Id
-  m_tmp_dxd_1 += m_inW; // m_tmp_dxd_1 = M = W^T * W + sigma2 * Id
-  bob::math::inv(m_tmp_dxd_1, m_invM); // m_invM = inv(M)
-}
-
-
-
-void bob::learn::misc::EMPCATrainer::eStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
-{
-  // Gets mu and W from the machine
-  const blitz::Array<double,1>& mu = machine.getInputSubtraction();
-  const blitz::Array<double,2>& W = machine.getWeights();
-  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
-
-  // Computes the statistics
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<ar.extent(0); ++i)
-  {
-    /// 1/ First order statistics: \f$z_first_order_i = inv(M) W^T (t - \mu)\f$
-    // m_tmp_f = t (sample) - mu (normalized sample)
-    m_tmp_f = ar(i,a) - mu;
-    // m_tmp_dxf = inv(M) * W^T
-    bob::math::prod(m_invM, Wt, m_tmp_dxf);
-    blitz::Array<double,1> z_first_order_i = m_z_first_order(i,blitz::Range::all());
-    // z_first_order_i = inv(M) * W^T * (t - mu)
-    bob::math::prod(m_tmp_dxf, m_tmp_f, z_first_order_i);
-
-    /// 2/ Second order statistics:
-    ///     z_second_order_i = sigma2 * inv(M) + z_first_order_i * z_first_order_i^T
-    blitz::Array<double,2> z_second_order_i = m_z_second_order(i,blitz::Range::all(),blitz::Range::all());
-    // m_tmp_dxd = z_first_order_i * z_first_order_i^T
-    bob::math::prod(z_first_order_i, z_first_order_i, m_tmp_dxd_1); // outer product
-    // z_second_order_i = sigma2 * inv(M)
-    z_second_order_i = m_invM;
-    z_second_order_i *= m_sigma2;
-    // z_second_order_i = sigma2 * inv(M) + z_first_order_i * z_first_order_i^T
-    z_second_order_i += m_tmp_dxd_1;
-  }
-}
-
-void bob::learn::misc::EMPCATrainer::mStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
-{
-  // 1/ New estimate of W
-  updateW(machine, ar);
-
-  // 2/ New estimate of sigma2
-  updateSigma2(machine, ar);
-
-  // Computes the new value of inverse(M), where M = Wt * W + sigma2 * Id
-  computeInvM();
-}
-
-void bob::learn::misc::EMPCATrainer::updateW(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
-  // Get the mean mu and the projection matrix W
-  const blitz::Array<double,1>& mu = machine.getInputSubtraction();
-  blitz::Array<double,2>& W = machine.updateWeights();
-  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
-
-  // Compute W = sum{ (t_{i} - mu) z_first_order_i^T} * inv( sum{z_second_order_i} )
-  m_tmp_fxd_1 = 0.;
-  m_tmp_dxd_1 = 0.;
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<ar.extent(0); ++i)
-  {
-    // m_tmp_f = t (sample) - mu (normalized sample)
-    m_tmp_f = ar(i,a) - mu;
-    // first order statistics of sample i
-    blitz::Array<double,1> z_first_order_i = m_z_first_order(i,blitz::Range::all());
-    // m_tmp_fxd_2 = (t - mu)*z_first_order_i
-    bob::math::prod(m_tmp_f, z_first_order_i, m_tmp_fxd_2);
-    m_tmp_fxd_1 += m_tmp_fxd_2;
-
-    // second order statistics of sample i
-    blitz::Array<double,2> z_second_order_i = m_z_second_order(i,blitz::Range::all(),blitz::Range::all());
-    m_tmp_dxd_1 += z_second_order_i;
-  }
-
-  // m_tmp_dxd_2 = inv( sum(E(x_i.x_i^T)) )
-  bob::math::inv(m_tmp_dxd_1, m_tmp_dxd_2);
-  // New estimates of W
-  bob::math::prod(m_tmp_fxd_1, m_tmp_dxd_2, W);
-  // Updates W'*W as well
-  bob::math::prod(Wt, W, m_inW);
-}
-
-void bob::learn::misc::EMPCATrainer::updateSigma2(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
-  // Get the mean mu and the projection matrix W
-  const blitz::Array<double,1>& mu = machine.getInputSubtraction();
-  const blitz::Array<double,2>& W = machine.getWeights();
-  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
-
-  m_sigma2 = 0.;
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<ar.extent(0); ++i)
-  {
-    // a. sigma2 += || t - mu ||^2
-    // m_tmp_f = t (sample) - mu (normalized sample)
-    m_tmp_f = ar(i,a) - mu;
-    // sigma2 += || t - mu ||^2
-    m_sigma2 += blitz::sum(blitz::pow2(m_tmp_f));
-
-    // b. sigma2 -= 2*E(x_i)^T*W^T*(t - mu)
-    // m_tmp_d = W^T*(t - mu)
-    bob::math::prod(Wt, m_tmp_f, m_tmp_d);
-    // first order of i
-    blitz::Array<double,1> z_first_order_i = m_z_first_order(i,blitz::Range::all());
-    // sigma2 -= 2*E(x_i)^T*W^T*(t - mu)
-    m_sigma2 -= 2*bob::math::dot(z_first_order_i, m_tmp_d);
-
-    // c. sigma2 += trace( E(x_i.x_i^T)*W^T*W )
-    // second order of i
-    blitz::Array<double,2> z_second_order_i = m_z_second_order(i,blitz::Range::all(),blitz::Range::all());
-    // m_tmp_dxd_1 = E(x_i.x_i^T)*W^T*W
-    bob::math::prod(z_second_order_i, m_inW, m_tmp_dxd_1);
-    // sigma2 += trace( E(x_i.x_i^T)*W^T*W )
-    m_sigma2 += bob::math::trace(m_tmp_dxd_1);
-  }
-  // Normalization factor
-  m_sigma2 /= (static_cast<double>(ar.extent(0)) * mu.extent(0));
-}
-
-double bob::learn::misc::EMPCATrainer::computeLikelihood(bob::learn::linear::Machine& machine)
-{
-  // Get W projection matrix
-  const blitz::Array<double,2>& W = machine.getWeights();
-  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
-  const size_t n_features = m_S.extent(0);
-
-  // 1/ Compute det(C), where C = sigma2.I + W.W^T
-  //            det(C) = det(sigma2 * C / sigma2) = det(sigma2 * Id) * det(C / sigma2)
-  //    We are using Sylvester's determinant theorem to compute a dxd
-  //    determinant rather than a fxf one: det(I + A.B) = det(I + B.A)
-  //            det(C) = sigma2^n_features * det(I + W.W^T/sigma2)
-  //                   = sigma2^n_features * det(I + W^T.W/sigma2) (cf. Bishop Appendix C)
-  // detC = det( eye(n_features) * sigma2 )
-
-  // detC = sigma2^n_features
-  double detC = pow(m_sigma2, n_features);
-  // m_tmp_dxd_1 = Id
-  bob::math::eye(m_tmp_dxd_1);
-  // m_tmp_dxd_2 = W^T.W
-  bob::math::prod(Wt, W, m_tmp_dxd_2);
-  // m_tmp_dxd_2 = W^T.W / sigma2
-  m_tmp_dxd_2 /= m_sigma2;
-  // m_tmp_dxd_1 = Id + W^T.W / sigma2
-  m_tmp_dxd_1 += m_tmp_dxd_2;
-  // detC = sigma2^n_features * det(I + W^T.W/sigma2)
-  detC *= bob::math::det(m_tmp_dxd_1);
-
-  // 2/ Compute inv(C), where C = sigma2.I + W.W^T
-  //    We are using the following identity (Property C.7 of Bishop's book)
-  //      (A + B.D^-1.C)^-1 = A^-1 - A^-1.B(D+C.A^-1.B)^-1.C.A^-1
-  //    Hence, inv(C) = sigma2^-1 .(I - W.M^-1.W^T)
-
-  // Compute inverse(M), where M = Wt * W + sigma2 * Id
-  computeInvM();
-  // m_tmp_fxf_1 = I = eye(n_features)
-  bob::math::eye(m_tmp_fxf_1);
-  // m_tmp_fxd_1 = W * inv(M)
-  bob::math::prod(W, m_invM, m_tmp_fxd_1);
-  // m_tmp_fxf_2 = (W * inv(M) * Wt)
-  bob::math::prod(m_tmp_fxd_1, Wt, m_tmp_fxf_2);
-  // m_tmp_fxd_1 = inv(C) = (I - W.M^-1.W^T) / sigma2
-  m_tmp_fxf_1 -= m_tmp_fxf_2;
-  m_tmp_fxf_1 /= m_sigma2;
-
-  // 3/ Compute inv(C).S
-  bob::math::prod(m_tmp_fxf_1, m_S, m_tmp_fxf_2);
-
-  // 4/ Use previous values to compute the log likelihood:
-  // Log likelihood =  - N/2*{ d*ln(2*PI) + ln |detC| + tr(C^-1.S) }
-  double llh = - static_cast<double>(m_z_first_order.extent(0)) / 2. *
-    ( m_f_log2pi + log(fabs(detC)) + bob::math::trace(m_tmp_fxf_2) );
-
-  return llh;
-}
diff --git a/bob/learn/misc/cpp/FABase.cpp b/bob/learn/misc/cpp/FABase.cpp
deleted file mode 100644
index dab2aebe8d3392b63da56196f458f2a1f051f989..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/FABase.cpp
+++ /dev/null
@@ -1,307 +0,0 @@
-/**
- * @date Tue Jan 27 15:51:15 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/FABase.h>
-#include <bob.core/array_copy.h>
-#include <bob.math/linear.h>
-#include <bob.math/inv.h>
-#include <limits>
-
-
-//////////////////// FABase ////////////////////
-bob::learn::misc::FABase::FABase():
-  m_ubm(boost::shared_ptr<bob::learn::misc::GMMMachine>()), m_ru(1), m_rv(1),
-  m_U(0,1), m_V(0,1), m_d(0)
-{}
-
-bob::learn::misc::FABase::FABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
-    const size_t ru, const size_t rv):
-  m_ubm(ubm), m_ru(ru), m_rv(rv),
-  m_U(getSupervectorLength(),ru), m_V(getSupervectorLength(),rv), m_d(getSupervectorLength())
-{
-  if (ru < 1) {
-    boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
-    m % ru;
-    throw std::runtime_error(m.str());
-  }
-  if (rv < 1) {
-    boost::format m("value for parameter `rv' (%lu) cannot be smaller than 1");
-    m % ru;
-    throw std::runtime_error(m.str());
-  }
-  updateCache();
-}
-
-bob::learn::misc::FABase::FABase(const bob::learn::misc::FABase& other):
-  m_ubm(other.m_ubm), m_ru(other.m_ru), m_rv(other.m_rv),
-  m_U(bob::core::array::ccopy(other.m_U)),
-  m_V(bob::core::array::ccopy(other.m_V)),
-  m_d(bob::core::array::ccopy(other.m_d))
-{
-  updateCache();
-}
-
-bob::learn::misc::FABase::~FABase() {
-}
-
-bob::learn::misc::FABase& bob::learn::misc::FABase::operator=
-(const bob::learn::misc::FABase& other)
-{
-  if (this != &other)
-  {
-    m_ubm = other.m_ubm;
-    m_ru = other.m_ru;
-    m_rv = other.m_rv;
-    m_U.reference(bob::core::array::ccopy(other.m_U));
-    m_V.reference(bob::core::array::ccopy(other.m_V));
-    m_d.reference(bob::core::array::ccopy(other.m_d));
-
-    updateCache();
-  }
-  return *this;
-}
-
-bool bob::learn::misc::FABase::operator==(const bob::learn::misc::FABase& b) const
-{
-  return ( (((m_ubm && b.m_ubm) && *m_ubm == *(b.m_ubm)) || (!m_ubm && !b.m_ubm)) &&
-          m_ru == b.m_ru && m_rv == b.m_rv &&
-          bob::core::array::isEqual(m_U, b.m_U) &&
-          bob::core::array::isEqual(m_V, b.m_V) &&
-          bob::core::array::isEqual(m_d, b.m_d));
-}
-
-bool bob::learn::misc::FABase::operator!=(const bob::learn::misc::FABase& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::FABase::is_similar_to(const bob::learn::misc::FABase& b,
-    const double r_epsilon, const double a_epsilon) const
-{
-  // TODO: update is_similar_to of the GMMMachine with the 2 epsilon's
-  return (( ((m_ubm && b.m_ubm) && m_ubm->is_similar_to(*(b.m_ubm), a_epsilon)) ||
-            (!m_ubm && !b.m_ubm) ) &&
-          m_ru == b.m_ru && m_rv == b.m_rv &&
-          bob::core::array::isClose(m_U, b.m_U, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_V, b.m_V, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_d, b.m_d, r_epsilon, a_epsilon));
-}
-
-void bob::learn::misc::FABase::resize(const size_t ru, const size_t rv)
-{
-  if (ru < 1) {
-    boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
-    m % ru;
-    throw std::runtime_error(m.str());
-  }
-  if (rv < 1) {
-    boost::format m("value for parameter `rv' (%lu) cannot be smaller than 1");
-    m % ru;
-    throw std::runtime_error(m.str());
-  }
-
-  m_ru = ru;
-  m_rv = rv;
-  m_U.resizeAndPreserve(m_U.extent(0), ru);
-  m_V.resizeAndPreserve(m_V.extent(0), rv);
-
-  updateCacheUbmUVD();
-}
-
-void bob::learn::misc::FABase::resize(const size_t ru, const size_t rv, const size_t cd)
-{
-  if (ru < 1) {
-    boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
-    m % ru;
-    throw std::runtime_error(m.str());
-  }
-  if (rv < 1) {
-    boost::format m("value for parameter `rv' (%lu) cannot be smaller than 1");
-    m % ru;
-    throw std::runtime_error(m.str());
-  }
-
-  if (!m_ubm || (m_ubm && getSupervectorLength() == cd))
-  {
-    m_ru = ru;
-    m_rv = rv;
-    m_U.resizeAndPreserve(cd, ru);
-    m_V.resizeAndPreserve(cd, rv);
-    m_d.resizeAndPreserve(cd);
-
-    updateCacheUbmUVD();
-  }
-  else {
-    boost::format m("value for parameter `cd' (%lu) is not set to %lu");
-    m % cd % getSupervectorLength();
-    throw std::runtime_error(m.str());
-  }
-}
-
-void bob::learn::misc::FABase::setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
-{
-  m_ubm = ubm;
-  m_U.resizeAndPreserve(getSupervectorLength(), m_ru);
-  m_V.resizeAndPreserve(getSupervectorLength(), m_rv);
-  m_d.resizeAndPreserve(getSupervectorLength());
-
-  updateCache();
-}
-
-void bob::learn::misc::FABase::setU(const blitz::Array<double,2>& U)
-{
-  if(U.extent(0) != m_U.extent(0)) { //checks dimension
-    boost::format m("number of rows in parameter `U' (%d) does not match the expected size (%d)");
-    m % U.extent(0) % m_U.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  if(U.extent(1) != m_U.extent(1)) { //checks dimension
-    boost::format m("number of columns in parameter `U' (%d) does not match the expected size (%d)");
-    m % U.extent(1) % m_U.extent(1);
-    throw std::runtime_error(m.str());
-  }
-  m_U.reference(bob::core::array::ccopy(U));
-
-  // update cache
-  updateCacheUbmUVD();
-}
-
-void bob::learn::misc::FABase::setV(const blitz::Array<double,2>& V)
-{
-  if(V.extent(0) != m_V.extent(0)) { //checks dimension
-    boost::format m("number of rows in parameter `V' (%d) does not match the expected size (%d)");
-    m % V.extent(0) % m_V.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  if(V.extent(1) != m_V.extent(1)) { //checks dimension
-    boost::format m("number of columns in parameter `V' (%d) does not match the expected size (%d)");
-    m % V.extent(1) % m_V.extent(1);
-    throw std::runtime_error(m.str());
-  }
-  m_V.reference(bob::core::array::ccopy(V));
-}
-
-void bob::learn::misc::FABase::setD(const blitz::Array<double,1>& d)
-{
-  if(d.extent(0) != m_d.extent(0)) { //checks dimension
-    boost::format m("size of input vector `d' (%d) does not match the expected size (%d)");
-    m % d.extent(0) % m_d.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  m_d.reference(bob::core::array::ccopy(d));
-}
-
-
-void bob::learn::misc::FABase::updateCache()
-{
-  updateCacheUbm();
-  updateCacheUbmUVD();
-  resizeTmp();
-}
-
-void bob::learn::misc::FABase::resizeTmp()
-{
-  m_tmp_IdPlusUSProdInv.resize(getDimRu(),getDimRu());
-  m_tmp_Fn_x.resize(getSupervectorLength());
-  m_tmp_ru.resize(getDimRu());
-  m_tmp_ruD.resize(getDimRu(), getNInputs());
-  m_tmp_ruru.resize(getDimRu(), getDimRu());
-}
-
-void bob::learn::misc::FABase::updateCacheUbm()
-{
-  // Put supervectors in cache
-  if (m_ubm)
-  {
-    m_cache_mean.resize(getSupervectorLength());
-    m_cache_sigma.resize(getSupervectorLength());
-    m_cache_mean  = m_ubm->getMeanSupervector();
-    m_cache_sigma = m_ubm->getVarianceSupervector();
-  }
-}
-
-void bob::learn::misc::FABase::updateCacheUbmUVD()
-{
-  // Compute and put  U^{T}.diag(sigma)^{-1} in cache
-  if (m_ubm)
-  {
-    blitz::firstIndex i;
-    blitz::secondIndex j;
-    m_cache_UtSigmaInv.resize(getDimRu(), getSupervectorLength());
-    m_cache_UtSigmaInv = m_U(j,i) / m_cache_sigma(j); // Ut * diag(sigma)^-1
-  }
-}
-
-void bob::learn::misc::FABase::computeIdPlusUSProdInv(const bob::learn::misc::GMMStats& gmm_stats,
-  blitz::Array<double,2>& output) const
-{
-  // Computes (Id + U^T.Sigma^-1.U.N_{i,h}.U)^-1 =
-  // (Id + sum_{c=1..C} N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c})^-1
-
-  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
-  // provide a non-const version of transpose())
-  blitz::Array<double,2> Ut = const_cast<blitz::Array<double,2>&>(m_U).transpose(1,0);
-
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  blitz::Range rall = blitz::Range::all();
-
-  bob::math::eye(m_tmp_ruru); // m_tmp_ruru = Id
-  // Loop and add N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c} to m_tmp_ruru at each iteration
-  const size_t dim_c = getNGaussians();
-  const size_t dim_d = getNInputs();
-  for(size_t c=0; c<dim_c; ++c) {
-    blitz::Range rc(c*dim_d,(c+1)*dim_d-1);
-    blitz::Array<double,2> Ut_c = Ut(rall,rc);
-    blitz::Array<double,1> sigma_c = m_cache_sigma(rc);
-    m_tmp_ruD = Ut_c(i,j) / sigma_c(j); // U_{c}^T.Sigma_{c}^-1
-    blitz::Array<double,2> U_c = m_U(rc,rall);
-    // Use m_cache_IdPlusUSProdInv as an intermediate array
-    bob::math::prod(m_tmp_ruD, U_c, output); // U_{c}^T.Sigma_{c}^-1.U_{c}
-    // Finally, add N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c} to m_tmp_ruru
-    m_tmp_ruru += output * gmm_stats.n(c);
-  }
-  // Computes the inverse
-  bob::math::inv(m_tmp_ruru, output);
-}
-
-
-void bob::learn::misc::FABase::computeFn_x(const bob::learn::misc::GMMStats& gmm_stats,
-  blitz::Array<double,1>& output) const
-{
-  // Compute Fn_x = sum_{sessions h}(N*(o - m) (Normalised first order statistics)
-  blitz::Range rall = blitz::Range::all();
-  const size_t dim_c = getNGaussians();
-  const size_t dim_d = getNInputs();
-  for(size_t c=0; c<dim_c; ++c) {
-    blitz::Range rc(c*dim_d,(c+1)*dim_d-1);
-    blitz::Array<double,1> Fn_x_c = output(rc);
-    blitz::Array<double,1> mean_c = m_cache_mean(rc);
-    Fn_x_c = gmm_stats.sumPx(c,rall) - mean_c*gmm_stats.n(c);
-  }
-}
-
-void bob::learn::misc::FABase::estimateX(const blitz::Array<double,2>& IdPlusUSProdInv,
-  const blitz::Array<double,1>& Fn_x, blitz::Array<double,1>& x) const
-{
-  // m_tmp_ru = UtSigmaInv * Fn_x = Ut*diag(sigma)^-1 * N*(o - m)
-  bob::math::prod(m_cache_UtSigmaInv, Fn_x, m_tmp_ru);
-  // x = IdPlusUSProdInv * m_cache_UtSigmaInv * Fn_x
-  bob::math::prod(IdPlusUSProdInv, m_tmp_ru, x);
-}
-
-
-void bob::learn::misc::FABase::estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
-{
-  if (!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
-  computeIdPlusUSProdInv(gmm_stats, m_tmp_IdPlusUSProdInv); // Computes first term
-  computeFn_x(gmm_stats, m_tmp_Fn_x); // Computes last term
-  estimateX(m_tmp_IdPlusUSProdInv, m_tmp_Fn_x, x); // Estimates the value of x
-}
-
diff --git a/bob/learn/misc/cpp/FABaseTrainer.cpp b/bob/learn/misc/cpp/FABaseTrainer.cpp
deleted file mode 100644
index 58278595615affb5998c1d5fd25b730297a25310..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/FABaseTrainer.cpp
+++ /dev/null
@@ -1,547 +0,0 @@
-/**
- * @date Sat Jan 31 17:16:17 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief FABaseTrainer functions
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/FABaseTrainer.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/array_random.h>
-#include <bob.math/inv.h>
-#include <bob.math/linear.h>
-#include <bob.core/check.h>
-#include <bob.core/array_repmat.h>
-#include <algorithm>
-
-
-bob::learn::misc::FABaseTrainer::FABaseTrainer():
-  m_Nid(0), m_dim_C(0), m_dim_D(0), m_dim_ru(0), m_dim_rv(0),
-  m_x(0), m_y(0), m_z(0), m_Nacc(0), m_Facc(0)
-{
-}
-
-bob::learn::misc::FABaseTrainer::FABaseTrainer(const bob::learn::misc::FABaseTrainer& other)
-{
-}
-
-bob::learn::misc::FABaseTrainer::~FABaseTrainer()
-{
-}
-
-void bob::learn::misc::FABaseTrainer::checkStatistics(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  for (size_t id=0; id<stats.size(); ++id) {
-    for (size_t s=0; s<stats[id].size(); ++s) {
-      if (stats[id][s]->sumPx.extent(0) != (int)m_dim_C) {
-        boost::format m("GMMStats C dimension parameter = %d is different than the expected value of %d");
-        m % stats[id][s]->sumPx.extent(0) % (int)m_dim_C;
-        throw std::runtime_error(m.str());
-      }
-      if (stats[id][s]->sumPx.extent(1) != (int)m_dim_D) {
-        boost::format m("GMMStats D dimension parameter = %d is different than the expected value of %d");
-        m % stats[id][s]->sumPx.extent(1) % (int)m_dim_D;
-        throw std::runtime_error(m.str());
-      }
-    }
-  }
-}
-
-
-void bob::learn::misc::FABaseTrainer::initUbmNidSumStatistics(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  m_Nid = stats.size();
-  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm = m.getUbm();
-  // Put UBM in cache
-  m_dim_C = ubm->getNGaussians();
-  m_dim_D = ubm->getNInputs();
-  m_dim_ru = m.getDimRu();
-  m_dim_rv = m.getDimRv();
-  // Check statistics dimensionality
-  checkStatistics(m, stats);
-  // Precomputes the sum of the statistics for each client/identity
-  precomputeSumStatisticsN(stats);
-  precomputeSumStatisticsF(stats);
-  // Cache and working arrays
-  initCache();
-}
-
-void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsN(
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  m_Nacc.clear();
-  blitz::Array<double,1> Nsum(m_dim_C);
-  for (size_t id=0; id<stats.size(); ++id) {
-    Nsum = 0.;
-    for (size_t s=0; s<stats[id].size(); ++s) {
-      Nsum += stats[id][s]->n;
-    }
-    m_Nacc.push_back(bob::core::array::ccopy(Nsum));
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsF(
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  m_Facc.clear();
-  blitz::Array<double,1> Fsum(m_dim_C*m_dim_D);
-  for (size_t id=0; id<stats.size(); ++id) {
-    Fsum = 0.;
-    for (size_t s=0; s<stats[id].size(); ++s) {
-      for (size_t c=0; c<m_dim_C; ++c) {
-        blitz::Array<double,1> Fsum_c = Fsum(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-        Fsum_c += stats[id][s]->sumPx(c,blitz::Range::all());
-      }
-    }
-    m_Facc.push_back(bob::core::array::ccopy(Fsum));
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& vec)
-{
-  m_x.clear();
-  m_y.clear();
-  m_z.clear();
-
-  blitz::Array<double,1> z0(m_dim_C*m_dim_D);
-  z0 = 0;
-  blitz::Array<double,1> y0(m_dim_rv);
-  y0 = 0;
-  blitz::Array<double,2> x0(m_dim_ru,0);
-  x0 = 0;
-  for (size_t i=0; i<vec.size(); ++i)
-  {
-    m_z.push_back(bob::core::array::ccopy(z0));
-    m_y.push_back(bob::core::array::ccopy(y0));
-    x0.resize(m_dim_ru, vec[i].size());
-    x0 = 0;
-    m_x.push_back(bob::core::array::ccopy(x0));
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::resetXYZ()
-{
-  for (size_t i=0; i<m_x.size(); ++i)
-  {
-    m_x[i] = 0.;
-    m_y[i] = 0.;
-    m_z[i] = 0.;
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::initCache()
-{
-  const size_t dim_CD = m_dim_C*m_dim_D;
-  // U
-  m_cache_UtSigmaInv.resize(m_dim_ru, dim_CD);
-  m_cache_UProd.resize(m_dim_C, m_dim_ru, m_dim_ru);
-  m_cache_IdPlusUProd_ih.resize(m_dim_ru, m_dim_ru);
-  m_cache_Fn_x_ih.resize(dim_CD);
-  m_acc_U_A1.resize(m_dim_C, m_dim_ru, m_dim_ru);
-  m_acc_U_A2.resize(dim_CD, m_dim_ru);
-  // V
-  m_cache_VtSigmaInv.resize(m_dim_rv, dim_CD);
-  m_cache_VProd.resize(m_dim_C, m_dim_rv, m_dim_rv);
-  m_cache_IdPlusVProd_i.resize(m_dim_rv, m_dim_rv);
-  m_cache_Fn_y_i.resize(dim_CD);
-  m_acc_V_A1.resize(m_dim_C, m_dim_rv, m_dim_rv);
-  m_acc_V_A2.resize(dim_CD, m_dim_rv);
-  // D
-  m_cache_DtSigmaInv.resize(dim_CD);
-  m_cache_DProd.resize(dim_CD);
-  m_cache_IdPlusDProd_i.resize(dim_CD);
-  m_cache_Fn_z_i.resize(dim_CD);
-  m_acc_D_A1.resize(dim_CD);
-  m_acc_D_A2.resize(dim_CD);
-
-  // tmp
-  m_tmp_CD.resize(dim_CD);
-  m_tmp_CD_b.resize(dim_CD);
-
-  m_tmp_ru.resize(m_dim_ru);
-  m_tmp_ruD.resize(m_dim_ru, m_dim_D);
-  m_tmp_ruru.resize(m_dim_ru, m_dim_ru);
-
-  m_tmp_rv.resize(m_dim_rv);
-  m_tmp_rvD.resize(m_dim_rv, m_dim_D);
-  m_tmp_rvrv.resize(m_dim_rv, m_dim_rv);
-}
-
-
-
-//////////////////////////// V ///////////////////////////
-void bob::learn::misc::FABaseTrainer::computeVtSigmaInv(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& V = m.getV();
-  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
-  // provide a non-const version of transpose())
-  const blitz::Array<double,2> Vt = const_cast<blitz::Array<double,2>&>(V).transpose(1,0);
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  m_cache_VtSigmaInv = Vt(i,j) / sigma(j); // Vt * diag(sigma)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeVProd(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& V = m.getV();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  blitz::Range rall = blitz::Range::all();
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
-    blitz::Array<double,2> Vv_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
-    blitz::Array<double,2> Vt_c = Vv_c.transpose(1,0);
-    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-    m_tmp_rvD = Vt_c(i,j) / sigma_c(j); // Vt_c * diag(sigma)^-1
-    bob::math::prod(m_tmp_rvD, Vv_c, VProd_c);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
-{
-  const blitz::Array<double,1>& Ni = m_Nacc[id];
-  bob::math::eye(m_tmp_rvrv); // m_tmp_rvrv = I
-  blitz::Range rall = blitz::Range::all();
-  for (size_t c=0; c<m_dim_C; ++c) {
-    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
-    m_tmp_rvrv += VProd_c * Ni(c);
-  }
-  bob::math::inv(m_tmp_rvrv, m_cache_IdPlusVProd_i); // m_cache_IdPlusVProd_i = ( I+Vt*diag(sigma)^-1*Ni*V)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeFn_y_i(const bob::learn::misc::FABase& mb,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
-{
-  const blitz::Array<double,2>& U = mb.getU();
-  const blitz::Array<double,1>& d = mb.getD();
-  // Compute Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h}) (Normalised first order statistics)
-  const blitz::Array<double,1>& Fi = m_Facc[id];
-  const blitz::Array<double,1>& m = mb.getUbmMean();
-  const blitz::Array<double,1>& z = m_z[id];
-  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
-  m_cache_Fn_y_i = Fi - m_tmp_CD * (m + d * z); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i})
-  const blitz::Array<double,2>& X = m_x[id];
-  blitz::Range rall = blitz::Range::all();
-  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
-  {
-    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
-    bob::math::prod(U, Xh, m_tmp_CD_b); // m_tmp_CD_b = U*x_{i,h}
-    const blitz::Array<double,1>& Nih = stats[h]->n;
-    bob::core::array::repelem(Nih, m_tmp_CD);
-    m_cache_Fn_y_i -= m_tmp_CD * m_tmp_CD_b; // N_{i,h} * U * x_{i,h}
-  }
-  // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-}
-
-void bob::learn::misc::FABaseTrainer::updateY_i(const size_t id)
-{
-  // Computes yi = Ayi * Cvs * Fn_yi
-  blitz::Array<double,1>& y = m_y[id];
-  // m_tmp_rv = m_cache_VtSigmaInv * m_cache_Fn_y_i = Vt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-  bob::math::prod(m_cache_VtSigmaInv, m_cache_Fn_y_i, m_tmp_rv);
-  bob::math::prod(m_cache_IdPlusVProd_i, m_tmp_rv, y);
-}
-
-void bob::learn::misc::FABaseTrainer::updateY(const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Precomputation
-  computeVtSigmaInv(m);
-  computeVProd(m);
-  // Loops over all people
-  for (size_t id=0; id<stats.size(); ++id) {
-    computeIdPlusVProd_i(id);
-    computeFn_y_i(m, stats[id], id);
-    updateY_i(id);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeAccumulatorsV(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Initializes the cache accumulator
-  m_acc_V_A1 = 0.;
-  m_acc_V_A2 = 0.;
-  // Loops over all people
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  blitz::Range rall = blitz::Range::all();
-  for (size_t id=0; id<stats.size(); ++id) {
-    computeIdPlusVProd_i(id);
-    computeFn_y_i(m, stats[id], id);
-
-    // Needs to return values to be accumulated for estimating V
-    const blitz::Array<double,1>& y = m_y[id];
-    m_tmp_rvrv = m_cache_IdPlusVProd_i;
-    m_tmp_rvrv += y(i) * y(j);
-    for (size_t c=0; c<m_dim_C; ++c)
-    {
-      blitz::Array<double,2> A1_y_c = m_acc_V_A1(c, rall, rall);
-      A1_y_c += m_tmp_rvrv * m_Nacc[id](c);
-    }
-    m_acc_V_A2 += m_cache_Fn_y_i(i) * y(j);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::updateV(blitz::Array<double,2>& V)
-{
-  blitz::Range rall = blitz::Range::all();
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    const blitz::Array<double,2> A1 = m_acc_V_A1(c, rall, rall);
-    bob::math::inv(A1, m_tmp_rvrv);
-    const blitz::Array<double,2> A2 = m_acc_V_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
-    blitz::Array<double,2> V_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
-    bob::math::prod(A2, m_tmp_rvrv, V_c);
-  }
-}
-
-
-//////////////////////////// U ///////////////////////////
-void bob::learn::misc::FABaseTrainer::computeUtSigmaInv(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& U = m.getU();
-  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
-  // provide a non-const version of transpose())
-  const blitz::Array<double,2> Ut = const_cast<blitz::Array<double,2>&>(U).transpose(1,0);
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  m_cache_UtSigmaInv = Ut(i,j) / sigma(j); // Ut * diag(sigma)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeUProd(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& U = m.getU();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    blitz::Array<double,2> UProd_c = m_cache_UProd(c, blitz::Range::all(), blitz::Range::all());
-    blitz::Array<double,2> Uu_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), blitz::Range::all());
-    blitz::Array<double,2> Ut_c = Uu_c.transpose(1,0);
-    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-    m_tmp_ruD = Ut_c(i,j) / sigma_c(j); // Ut_c * diag(sigma)^-1
-    bob::math::prod(m_tmp_ruD, Uu_c, UProd_c);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeIdPlusUProd_ih(
-  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats)
-{
-  const blitz::Array<double,1>& Nih = stats->n;
-  bob::math::eye(m_tmp_ruru); // m_tmp_ruru = I
-  for (size_t c=0; c<m_dim_C; ++c) {
-    blitz::Array<double,2> UProd_c = m_cache_UProd(c,blitz::Range::all(),blitz::Range::all());
-    m_tmp_ruru += UProd_c * Nih(c);
-  }
-  bob::math::inv(m_tmp_ruru, m_cache_IdPlusUProd_ih); // m_cache_IdPlusUProd_ih = ( I+Ut*diag(sigma)^-1*Ni*U)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeFn_x_ih(const bob::learn::misc::FABase& mb,
-  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id)
-{
-  const blitz::Array<double,2>& V = mb.getV();
-  const blitz::Array<double,1>& d =  mb.getD();
-  // Compute Fn_x_ih = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i}) (Normalised first order statistics)
-  const blitz::Array<double,2>& Fih = stats->sumPx;
-  const blitz::Array<double,1>& m = mb.getUbmMean();
-  const blitz::Array<double,1>& z = m_z[id];
-  const blitz::Array<double,1>& Nih = stats->n;
-  bob::core::array::repelem(Nih, m_tmp_CD);
-  for (size_t c=0; c<m_dim_C; ++c) {
-    blitz::Array<double,1> Fn_x_ih_c = m_cache_Fn_x_ih(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-    Fn_x_ih_c = Fih(c,blitz::Range::all());
-  }
-  m_cache_Fn_x_ih -= m_tmp_CD * (m + d * z); // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i})
-
-  const blitz::Array<double,1>& y = m_y[id];
-  bob::math::prod(V, y, m_tmp_CD_b);
-  m_cache_Fn_x_ih -= m_tmp_CD * m_tmp_CD_b;
-  // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
-}
-
-void bob::learn::misc::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
-{
-  // Computes xih = Axih * Cus * Fn_x_ih
-  blitz::Array<double,1> x = m_x[id](blitz::Range::all(), h);
-  // m_tmp_ru = m_cache_UtSigmaInv * m_cache_Fn_x_ih = Ut*diag(sigma)^-1 * N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
-  bob::math::prod(m_cache_UtSigmaInv, m_cache_Fn_x_ih, m_tmp_ru);
-  bob::math::prod(m_cache_IdPlusUProd_ih, m_tmp_ru, x);
-}
-
-void bob::learn::misc::FABaseTrainer::updateX(const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Precomputation
-  computeUtSigmaInv(m);
-  computeUProd(m);
-  // Loops over all people
-  for (size_t id=0; id<stats.size(); ++id) {
-    int n_session_i = stats[id].size();
-    for (int s=0; s<n_session_i; ++s) {
-      computeIdPlusUProd_ih(stats[id][s]);
-      computeFn_x_ih(m, stats[id][s], id);
-      updateX_ih(id, s);
-    }
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeAccumulatorsU(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Initializes the cache accumulator
-  m_acc_U_A1 = 0.;
-  m_acc_U_A2 = 0.;
-  // Loops over all people
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  blitz::Range rall = blitz::Range::all();
-  for (size_t id=0; id<stats.size(); ++id) {
-    int n_session_i = stats[id].size();
-    for (int h=0; h<n_session_i; ++h) {
-      computeIdPlusUProd_ih(stats[id][h]);
-      computeFn_x_ih(m, stats[id][h], id);
-
-      // Needs to return values to be accumulated for estimating U
-      blitz::Array<double,1> x = m_x[id](rall, h);
-      m_tmp_ruru = m_cache_IdPlusUProd_ih;
-      m_tmp_ruru += x(i) * x(j);
-      for (int c=0; c<(int)m_dim_C; ++c)
-      {
-        blitz::Array<double,2> A1_x_c = m_acc_U_A1(c,rall,rall);
-        A1_x_c += m_tmp_ruru * stats[id][h]->n(c);
-      }
-      m_acc_U_A2 += m_cache_Fn_x_ih(i) * x(j);
-    }
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::updateU(blitz::Array<double,2>& U)
-{
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    const blitz::Array<double,2> A1 = m_acc_U_A1(c,blitz::Range::all(),blitz::Range::all());
-    bob::math::inv(A1, m_tmp_ruru);
-    const blitz::Array<double,2> A2 = m_acc_U_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
-    blitz::Array<double,2> U_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
-    bob::math::prod(A2, m_tmp_ruru, U_c);
-  }
-}
-
-
-//////////////////////////// D ///////////////////////////
-void bob::learn::misc::FABaseTrainer::computeDtSigmaInv(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,1>& d = m.getD();
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  m_cache_DtSigmaInv = d / sigma; // Dt * diag(sigma)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeDProd(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,1>& d = m.getD();
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  m_cache_DProd = d / sigma * d; // Dt * diag(sigma)^-1 * D
-}
-
-void bob::learn::misc::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
-{
-  const blitz::Array<double,1>& Ni = m_Nacc[id];
-  bob::core::array::repelem(Ni, m_tmp_CD); // m_tmp_CD = Ni 'repmat'
-  m_cache_IdPlusDProd_i = 1.; // m_cache_IdPlusDProd_i = Id
-  m_cache_IdPlusDProd_i += m_cache_DProd * m_tmp_CD; // m_cache_IdPlusDProd_i = I+Dt*diag(sigma)^-1*Ni*D
-  m_cache_IdPlusDProd_i = 1 / m_cache_IdPlusDProd_i; // m_cache_IdPlusVProd_i = (I+Dt*diag(sigma)^-1*Ni*D)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeFn_z_i(
-  const bob::learn::misc::FABase& mb,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
-{
-  const blitz::Array<double,2>& U = mb.getU();
-  const blitz::Array<double,2>& V = mb.getV();
-  // Compute Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h}) (Normalised first order statistics)
-  const blitz::Array<double,1>& Fi = m_Facc[id];
-  const blitz::Array<double,1>& m = mb.getUbmMean();
-  const blitz::Array<double,1>& y = m_y[id];
-  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
-  bob::math::prod(V, y, m_tmp_CD_b); // m_tmp_CD_b = V * y
-  m_cache_Fn_z_i = Fi - m_tmp_CD * (m + m_tmp_CD_b); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i})
-
-  const blitz::Array<double,2>& X = m_x[id];
-  blitz::Range rall = blitz::Range::all();
-  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
-  {
-    const blitz::Array<double,1>& Nh = stats[h]->n; // Nh = N_{i,h} (length: C)
-    bob::core::array::repelem(Nh, m_tmp_CD);
-    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
-    bob::math::prod(U, Xh, m_tmp_CD_b);
-    m_cache_Fn_z_i -= m_tmp_CD * m_tmp_CD_b;
-  }
-  // Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
-}
-
-void bob::learn::misc::FABaseTrainer::updateZ_i(const size_t id)
-{
-  // Computes zi = Azi * D^T.Sigma^-1 * Fn_zi
-  blitz::Array<double,1>& z = m_z[id];
-  // m_tmp_CD = m_cache_DtSigmaInv * m_cache_Fn_z_i = Dt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
-  z = m_cache_IdPlusDProd_i * m_cache_DtSigmaInv * m_cache_Fn_z_i;
-}
-
-void bob::learn::misc::FABaseTrainer::updateZ(const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Precomputation
-  computeDtSigmaInv(m);
-  computeDProd(m);
-  // Loops over all people
-  for (size_t id=0; id<m_Nid; ++id) {
-    computeIdPlusDProd_i(id);
-    computeFn_z_i(m, stats[id], id);
-    updateZ_i(id);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeAccumulatorsD(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Initializes the cache accumulator
-  m_acc_D_A1 = 0.;
-  m_acc_D_A2 = 0.;
-  // Loops over all people
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  for (size_t id=0; id<stats.size(); ++id) {
-    computeIdPlusDProd_i(id);
-    computeFn_z_i(m, stats[id], id);
-
-    // Needs to return values to be accumulated for estimating D
-    blitz::Array<double,1> z = m_z[id];
-    bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
-    m_acc_D_A1 += (m_cache_IdPlusDProd_i + z * z) * m_tmp_CD;
-    m_acc_D_A2 += m_cache_Fn_z_i * z;
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::updateD(blitz::Array<double,1>& d)
-{
-  d = m_acc_D_A2 / m_acc_D_A1;
-}
-
-
diff --git a/bob/learn/misc/cpp/GMMBaseTrainer.cpp b/bob/learn/misc/cpp/GMMBaseTrainer.cpp
deleted file mode 100644
index 8210f2bb4ce7aaf9b2319751673298ea311145d6..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/GMMBaseTrainer.cpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/GMMBaseTrainer.h>
-#include <bob.core/assert.h>
-#include <bob.core/check.h>
-
-bob::learn::misc::GMMBaseTrainer::GMMBaseTrainer(const bool update_means,
-    const bool update_variances, const bool update_weights,
-    const double mean_var_update_responsibilities_threshold):
-  m_update_means(update_means), m_update_variances(update_variances),
-  m_update_weights(update_weights),
-  m_mean_var_update_responsibilities_threshold(mean_var_update_responsibilities_threshold)
-{}
-
-bob::learn::misc::GMMBaseTrainer::GMMBaseTrainer(const bob::learn::misc::GMMBaseTrainer& b):
-  m_update_means(b.m_update_means), m_update_variances(b.m_update_variances),
-  m_mean_var_update_responsibilities_threshold(b.m_mean_var_update_responsibilities_threshold)
-{}
-
-bob::learn::misc::GMMBaseTrainer::~GMMBaseTrainer()
-{}
-
-void bob::learn::misc::GMMBaseTrainer::initialize(bob::learn::misc::GMMMachine& gmm)
-{
-  // Allocate memory for the sufficient statistics and initialise
-  m_ss.resize(gmm.getNGaussians(),gmm.getNInputs());
-}
-
-void bob::learn::misc::GMMBaseTrainer::eStep(bob::learn::misc::GMMMachine& gmm,
-  const blitz::Array<double,2>& data)
-{
-  m_ss.init();
-  // Calculate the sufficient statistics and save in m_ss
-  gmm.accStatistics(data, m_ss);
-}
-
-double bob::learn::misc::GMMBaseTrainer::computeLikelihood(bob::learn::misc::GMMMachine& gmm)
-{
-  return m_ss.log_likelihood / m_ss.T;
-}
-
-
-bob::learn::misc::GMMBaseTrainer& bob::learn::misc::GMMBaseTrainer::operator=
-  (const bob::learn::misc::GMMBaseTrainer &other)
-{
-  if (this != &other)
-  {
-    m_ss = other.m_ss;
-    m_update_means = other.m_update_means;
-    m_update_variances = other.m_update_variances;
-    m_update_weights = other.m_update_weights;
-    m_mean_var_update_responsibilities_threshold = other.m_mean_var_update_responsibilities_threshold;
-  }
-  return *this;
-}
-
-bool bob::learn::misc::GMMBaseTrainer::operator==
-  (const bob::learn::misc::GMMBaseTrainer &other) const
-{
-  return m_ss == other.m_ss &&
-         m_update_means == other.m_update_means &&
-         m_update_variances == other.m_update_variances &&
-         m_update_weights == other.m_update_weights &&
-         m_mean_var_update_responsibilities_threshold == other.m_mean_var_update_responsibilities_threshold;
-}
-
-bool bob::learn::misc::GMMBaseTrainer::operator!=
-  (const bob::learn::misc::GMMBaseTrainer &other) const
-{
-  return !(this->operator==(other));
-}
-
-bool bob::learn::misc::GMMBaseTrainer::is_similar_to
-  (const bob::learn::misc::GMMBaseTrainer &other, const double r_epsilon,
-   const double a_epsilon) const
-{
-  return m_ss == other.m_ss &&
-         m_update_means == other.m_update_means &&
-         m_update_variances == other.m_update_variances &&
-         m_update_weights == other.m_update_weights &&
-         bob::core::isClose(m_mean_var_update_responsibilities_threshold,
-          other.m_mean_var_update_responsibilities_threshold, r_epsilon, a_epsilon);
-}
-
-void bob::learn::misc::GMMBaseTrainer::setGMMStats(const bob::learn::misc::GMMStats& stats)
-{
-  bob::core::array::assertSameShape(m_ss.sumPx, stats.sumPx);
-  m_ss = stats;
-}
diff --git a/bob/learn/misc/cpp/GMMMachine.cpp b/bob/learn/misc/cpp/GMMMachine.cpp
deleted file mode 100644
index 6261d42d6ffe7eae63dc262e3a210ca972678720..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/GMMMachine.cpp
+++ /dev/null
@@ -1,436 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.core/assert.h>
-#include <bob.math/log.h>
-
-bob::learn::misc::GMMMachine::GMMMachine(): m_gaussians(0) {
-  resize(0,0);
-}
-
-bob::learn::misc::GMMMachine::GMMMachine(const size_t n_gaussians, const size_t n_inputs):
-  m_gaussians(0)
-{
-  resize(n_gaussians,n_inputs);
-}
-
-bob::learn::misc::GMMMachine::GMMMachine(bob::io::base::HDF5File& config):
-  m_gaussians(0)
-{
-  load(config);
-}
-
-bob::learn::misc::GMMMachine::GMMMachine(const GMMMachine& other)  
-{
-  copy(other);
-}
-
-bob::learn::misc::GMMMachine& bob::learn::misc::GMMMachine::operator=(const bob::learn::misc::GMMMachine &other) {
-  // protect against invalid self-assignment
-  if (this != &other)
-    copy(other);
-
-  // by convention, always return *this
-  return *this;
-}
-
-bool bob::learn::misc::GMMMachine::operator==(const bob::learn::misc::GMMMachine& b) const
-{
-  if (m_n_gaussians != b.m_n_gaussians || m_n_inputs != b.m_n_inputs ||
-      !bob::core::array::isEqual(m_weights, b.m_weights))
-    return false;
-
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    if(!(*(m_gaussians[i]) == *(b.m_gaussians[i])))
-      return false;
-  }
-
-  return true;
-}
-
-bool bob::learn::misc::GMMMachine::operator!=(const bob::learn::misc::GMMMachine& b) const {
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::GMMMachine::is_similar_to(const bob::learn::misc::GMMMachine& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  if (m_n_gaussians != b.m_n_gaussians || m_n_inputs != b.m_n_inputs ||
-      !bob::core::array::isClose(m_weights, b.m_weights, r_epsilon, a_epsilon))
-    return false;
-
-  for (size_t i = 0; i < m_n_gaussians; ++i)
-    if (!m_gaussians[i]->is_similar_to(*b.m_gaussians[i], r_epsilon, a_epsilon))
-      return false;
-
-  return true;
-}
-
-void bob::learn::misc::GMMMachine::copy(const GMMMachine& other) {
-  m_n_gaussians = other.m_n_gaussians;
-  m_n_inputs = other.m_n_inputs;
-
-  // Initialise weights
-  m_weights.resize(m_n_gaussians);
-  m_weights = other.m_weights;
-
-  // Initialise Gaussians
-  m_gaussians.clear();
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    boost::shared_ptr<bob::learn::misc::Gaussian> g(new bob::learn::misc::Gaussian(*(other.m_gaussians[i])));
-    m_gaussians.push_back(g);
-  }
-
-  // Initialise cache
-  initCache();
-}
-
-bob::learn::misc::GMMMachine::~GMMMachine() { }
-
-
-/////////////////////
-// Setters 
-////////////////////
-
-void bob::learn::misc::GMMMachine::setWeights(const blitz::Array<double,1> &weights) {
-  bob::core::array::assertSameShape(weights, m_weights);
-  m_weights = weights;
-  recomputeLogWeights();
-}
-
-void bob::learn::misc::GMMMachine::recomputeLogWeights() const
-{
-  m_cache_log_weights = blitz::log(m_weights);
-}
-
-void bob::learn::misc::GMMMachine::setMeans(const blitz::Array<double,2> &means) {
-  bob::core::array::assertSameDimensionLength(means.extent(0), m_n_gaussians);
-  bob::core::array::assertSameDimensionLength(means.extent(1), m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians[i]->updateMean() = means(i,blitz::Range::all());
-  m_cache_supervector = false;
-}
-
-void bob::learn::misc::GMMMachine::setMeanSupervector(const blitz::Array<double,1> &mean_supervector) {
-  bob::core::array::assertSameDimensionLength(mean_supervector.extent(0), m_n_gaussians*m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians[i]->updateMean() = mean_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
-  m_cache_supervector = false;
-}
-
-
-void bob::learn::misc::GMMMachine::setVariances(const blitz::Array<double, 2 >& variances) {
-  bob::core::array::assertSameDimensionLength(variances.extent(0), m_n_gaussians);
-  bob::core::array::assertSameDimensionLength(variances.extent(1), m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    m_gaussians[i]->updateVariance() = variances(i,blitz::Range::all());
-    m_gaussians[i]->applyVarianceThresholds();
-  }
-  m_cache_supervector = false;
-}
-
-void bob::learn::misc::GMMMachine::setVarianceSupervector(const blitz::Array<double,1> &variance_supervector) {
-  bob::core::array::assertSameDimensionLength(variance_supervector.extent(0), m_n_gaussians*m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    m_gaussians[i]->updateVariance() = variance_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
-    m_gaussians[i]->applyVarianceThresholds();
-  }
-  m_cache_supervector = false;
-}
-
-void bob::learn::misc::GMMMachine::setVarianceThresholds(const double value) {
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians[i]->setVarianceThresholds(value);
-  m_cache_supervector = false;
-}
-
-void bob::learn::misc::GMMMachine::setVarianceThresholds(blitz::Array<double, 1> variance_thresholds) {
-  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians[i]->setVarianceThresholds(variance_thresholds);
-  m_cache_supervector = false;
-}
-
-void bob::learn::misc::GMMMachine::setVarianceThresholds(const blitz::Array<double, 2>& variance_thresholds) {
-  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
-  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians[i]->setVarianceThresholds(variance_thresholds(i,blitz::Range::all()));
-  m_cache_supervector = false;
-}
-
-/////////////////////
-// Getters 
-////////////////////
-
-const blitz::Array<double,2> bob::learn::misc::GMMMachine::getMeans() const {
-
-  blitz::Array<double,2> means(m_n_gaussians,m_n_inputs);  
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    means(i,blitz::Range::all()) = m_gaussians[i]->getMean();
-    
-  return means;
-}
-
-const blitz::Array<double,2> bob::learn::misc::GMMMachine::getVariances() const{
-  
-  blitz::Array<double,2> variances(m_n_gaussians,m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    variances(i,blitz::Range::all()) = m_gaussians[i]->getVariance();
-
-  return variances;
-}
-
-
-const blitz::Array<double,2>  bob::learn::misc::GMMMachine::getVarianceThresholds() const {
-  //bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
-  //bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
-  blitz::Array<double, 2> variance_thresholds(m_n_gaussians, m_n_inputs);
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    variance_thresholds(i,blitz::Range::all()) = m_gaussians[i]->getVarianceThresholds();
-
-  return variance_thresholds;
-}
-
-
-/////////////////////
-// Methods
-////////////////////
-
-
-void bob::learn::misc::GMMMachine::resize(const size_t n_gaussians, const size_t n_inputs) {
-  m_n_gaussians = n_gaussians;
-  m_n_inputs = n_inputs;
-
-  // Initialise weights
-  m_weights.resize(m_n_gaussians);
-  m_weights = 1.0 / m_n_gaussians;
-
-  // Initialise Gaussians
-  m_gaussians.clear();
-  for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians.push_back(boost::shared_ptr<bob::learn::misc::Gaussian>(new bob::learn::misc::Gaussian(n_inputs)));
-
-  // Initialise cache arrays
-  initCache();
-}
-
-double bob::learn::misc::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x,
-  blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
-{
-  // Check dimension
-  bob::core::array::assertSameDimensionLength(log_weighted_gaussian_likelihoods.extent(0), m_n_gaussians);
-  bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
-  return logLikelihood_(x,log_weighted_gaussian_likelihoods);
-}
-
-double bob::learn::misc::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x,
-  blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
-{
-  // Initialise variables
-  double log_likelihood = bob::math::Log::LogZero;
-
-  // Accumulate the weighted log likelihoods from each Gaussian
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    double l = m_cache_log_weights(i) + m_gaussians[i]->logLikelihood_(x);
-    log_weighted_gaussian_likelihoods(i) = l;
-    log_likelihood = bob::math::Log::logAdd(log_likelihood, l);
-  }
-
-  // Return log(p(x|GMMMachine))
-  return log_likelihood;
-}
-
-double bob::learn::misc::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x) const {
-  // Check dimension
-  bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
-  // Call the other logLikelihood_ (overloaded) function
-  // (log_weighted_gaussian_likelihoods will be discarded)
-  return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
-}
-
-double bob::learn::misc::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x) const {
-  // Call the other logLikelihood (overloaded) function
-  // (log_weighted_gaussian_likelihoods will be discarded)
-  return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
-}
-
-void bob::learn::misc::GMMMachine::accStatistics(const blitz::Array<double,2>& input,
-    bob::learn::misc::GMMStats& stats) const {
-  // iterate over data
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<input.extent(0); ++i) {
-    // Get example
-    blitz::Array<double,1> x(input(i,a));
-    // Accumulate statistics
-    accStatistics(x,stats);
-  }
-}
-
-void bob::learn::misc::GMMMachine::accStatistics_(const blitz::Array<double,2>& input, bob::learn::misc::GMMStats& stats) const {
-  // iterate over data
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<input.extent(0); ++i) {
-    // Get example
-    blitz::Array<double,1> x(input(i, a));
-    // Accumulate statistics
-    accStatistics_(x,stats);
-  }
-}
-
-void bob::learn::misc::GMMMachine::accStatistics(const blitz::Array<double, 1>& x, bob::learn::misc::GMMStats& stats) const {
-  // check GMMStats size
-  bob::core::array::assertSameDimensionLength(stats.sumPx.extent(0), m_n_gaussians);
-  bob::core::array::assertSameDimensionLength(stats.sumPx.extent(1), m_n_inputs);
-
-  // Calculate Gaussian and GMM likelihoods
-  // - m_cache_log_weighted_gaussian_likelihoods(i) = log(weight_i*p(x|gaussian_i))
-  // - log_likelihood = log(sum_i(weight_i*p(x|gaussian_i)))
-  double log_likelihood = logLikelihood(x, m_cache_log_weighted_gaussian_likelihoods);
-
-  accStatisticsInternal(x, stats, log_likelihood);
-}
-
-void bob::learn::misc::GMMMachine::accStatistics_(const blitz::Array<double, 1>& x, bob::learn::misc::GMMStats& stats) const {
-  // Calculate Gaussian and GMM likelihoods
-  // - m_cache_log_weighted_gaussian_likelihoods(i) = log(weight_i*p(x|gaussian_i))
-  // - log_likelihood = log(sum_i(weight_i*p(x|gaussian_i)))
-  double log_likelihood = logLikelihood_(x, m_cache_log_weighted_gaussian_likelihoods);
-
-  accStatisticsInternal(x, stats, log_likelihood);
-}
-
-void bob::learn::misc::GMMMachine::accStatisticsInternal(const blitz::Array<double, 1>& x,
-  bob::learn::misc::GMMStats& stats, const double log_likelihood) const
-{
-  // Calculate responsibilities
-  m_cache_P = blitz::exp(m_cache_log_weighted_gaussian_likelihoods - log_likelihood);
-
-  // Accumulate statistics
-  // - total likelihood
-  stats.log_likelihood += log_likelihood;
-
-  // - number of samples
-  stats.T++;
-
-  // - responsibilities
-  stats.n += m_cache_P;
-
-  // - first order stats
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-
-  m_cache_Px = m_cache_P(i) * x(j);
-
-  stats.sumPx += m_cache_Px;
-
-  // - second order stats
-  stats.sumPxx += (m_cache_Px(i,j) * x(j));
-}
-
-boost::shared_ptr<bob::learn::misc::Gaussian> bob::learn::misc::GMMMachine::getGaussian(const size_t i) {
-  if (i>=m_n_gaussians) {
-    throw std::runtime_error("getGaussian(): index out of bounds");
-  }
-  return m_gaussians[i];
-}
-
-void bob::learn::misc::GMMMachine::save(bob::io::base::HDF5File& config) const {
-  int64_t v = static_cast<int64_t>(m_n_gaussians);
-  config.set("m_n_gaussians", v);
-  v = static_cast<int64_t>(m_n_inputs);
-  config.set("m_n_inputs", v);
-
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    std::ostringstream oss;
-    oss << "m_gaussians" << i;
-
-    if (!config.hasGroup(oss.str())) config.createGroup(oss.str());
-    config.cd(oss.str());
-    m_gaussians[i]->save(config);
-    config.cd("..");
-  }
-
-  config.setArray("m_weights", m_weights);
-}
-
-void bob::learn::misc::GMMMachine::load(bob::io::base::HDF5File& config) {
-  int64_t v;
-  v = config.read<int64_t>("m_n_gaussians");
-  m_n_gaussians = static_cast<size_t>(v);
-  v = config.read<int64_t>("m_n_inputs");
-  m_n_inputs = static_cast<size_t>(v);
-
-  m_gaussians.clear();
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    m_gaussians.push_back(boost::shared_ptr<bob::learn::misc::Gaussian>(new bob::learn::misc::Gaussian(m_n_inputs)));
-    std::ostringstream oss;
-    oss << "m_gaussians" << i;
-    config.cd(oss.str());
-    m_gaussians[i]->load(config);
-    config.cd("..");
-  }
-
-  m_weights.resize(m_n_gaussians);
-  config.readArray("m_weights", m_weights);
-
-  // Initialise cache
-  initCache();
-}
-
-void bob::learn::misc::GMMMachine::updateCacheSupervectors() const
-{
-  m_cache_mean_supervector.resize(m_n_gaussians*m_n_inputs);
-  m_cache_variance_supervector.resize(m_n_gaussians*m_n_inputs);
-
-  for(size_t i=0; i<m_n_gaussians; ++i) {
-    blitz::Range range(i*m_n_inputs, (i+1)*m_n_inputs-1);
-    m_cache_mean_supervector(range) = m_gaussians[i]->getMean();
-    m_cache_variance_supervector(range) = m_gaussians[i]->getVariance();
-  }
-  m_cache_supervector = true;
-}
-
-void bob::learn::misc::GMMMachine::initCache() const {
-  // Initialise cache arrays
-  m_cache_log_weights.resize(m_n_gaussians);
-  recomputeLogWeights();
-  m_cache_log_weighted_gaussian_likelihoods.resize(m_n_gaussians);
-  m_cache_P.resize(m_n_gaussians);
-  m_cache_Px.resize(m_n_gaussians,m_n_inputs);
-  m_cache_supervector = false;
-}
-
-void bob::learn::misc::GMMMachine::reloadCacheSupervectors() const {
-  if(!m_cache_supervector)
-    updateCacheSupervectors();
-}
-
-const blitz::Array<double,1>& bob::learn::misc::GMMMachine::getMeanSupervector() const {
-  if(!m_cache_supervector)
-    updateCacheSupervectors();
-  return m_cache_mean_supervector;
-}
-
-const blitz::Array<double,1>& bob::learn::misc::GMMMachine::getVarianceSupervector() const {
-  if(!m_cache_supervector)
-    updateCacheSupervectors();
-  return m_cache_variance_supervector;
-}
-
-namespace bob { namespace learn { namespace misc {
-  std::ostream& operator<<(std::ostream& os, const GMMMachine& machine) {
-    os << "Weights = " << machine.m_weights << std::endl;
-    for(size_t i=0; i < machine.m_n_gaussians; ++i) {
-      os << "Gaussian " << i << ": " << std::endl << *(machine.m_gaussians[i]);
-    }
-
-    return os;
-  }
-} } }
diff --git a/bob/learn/misc/cpp/GMMStats.cpp b/bob/learn/misc/cpp/GMMStats.cpp
deleted file mode 100644
index c0c25dfa67c671ccc7ba851e25347c5ad3f5c70b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/GMMStats.cpp
+++ /dev/null
@@ -1,151 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.core/logging.h>
-#include <bob.core/check.h>
-
-bob::learn::misc::GMMStats::GMMStats() {
-  resize(0,0);
-}
-
-bob::learn::misc::GMMStats::GMMStats(const size_t n_gaussians, const size_t n_inputs) {
-  resize(n_gaussians,n_inputs);
-}
-
-bob::learn::misc::GMMStats::GMMStats(bob::io::base::HDF5File& config) {
-  load(config);
-}
-
-bob::learn::misc::GMMStats::GMMStats(const bob::learn::misc::GMMStats& other) {
-  copy(other);
-}
-
-bob::learn::misc::GMMStats::~GMMStats() {
-}
-
-bob::learn::misc::GMMStats&
-bob::learn::misc::GMMStats::operator=(const bob::learn::misc::GMMStats& other) {
-  // protect against invalid self-assignment
-  if (this != &other)
-    copy(other);
-
-  // by convention, always return *this
-  return *this;
-}
-
-bool bob::learn::misc::GMMStats::operator==(const bob::learn::misc::GMMStats& b) const
-{
-  return (T == b.T && log_likelihood == b.log_likelihood &&
-          bob::core::array::isEqual(n, b.n) &&
-          bob::core::array::isEqual(sumPx, b.sumPx) &&
-          bob::core::array::isEqual(sumPxx, b.sumPxx));
-}
-
-bool
-bob::learn::misc::GMMStats::operator!=(const bob::learn::misc::GMMStats& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::GMMStats::is_similar_to(const bob::learn::misc::GMMStats& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  return (T == b.T &&
-          bob::core::isClose(log_likelihood, b.log_likelihood, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(n, b.n, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(sumPx, b.sumPx, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(sumPxx, b.sumPxx, r_epsilon, a_epsilon));
-}
-
-
-void bob::learn::misc::GMMStats::operator+=(const bob::learn::misc::GMMStats& b) {
-  // Check dimensions
-  if(n.extent(0) != b.n.extent(0) ||
-      sumPx.extent(0) != b.sumPx.extent(0) || sumPx.extent(1) != b.sumPx.extent(1) ||
-      sumPxx.extent(0) != b.sumPxx.extent(0) || sumPxx.extent(1) != b.sumPxx.extent(1))
-    // TODO: add a specialized exception
-    throw std::runtime_error("if you see this exception, fill a bug report");
-
-  // Update GMMStats object with the content of the other one
-  T += b.T;
-  log_likelihood += b.log_likelihood;
-  n += b.n;
-  sumPx += b.sumPx;
-  sumPxx += b.sumPxx;
-}
-
-void bob::learn::misc::GMMStats::copy(const GMMStats& other) {
-  // Resize arrays
-  resize(other.sumPx.extent(0),other.sumPx.extent(1));
-  // Copy content
-  T = other.T;
-  log_likelihood = other.log_likelihood;
-  n = other.n;
-  sumPx = other.sumPx;
-  sumPxx = other.sumPxx;
-}
-
-void bob::learn::misc::GMMStats::resize(const size_t n_gaussians, const size_t n_inputs) {
-  n.resize(n_gaussians);
-  sumPx.resize(n_gaussians, n_inputs);
-  sumPxx.resize(n_gaussians, n_inputs);
-  init();
-}
-
-void bob::learn::misc::GMMStats::init() {
-  log_likelihood = 0;
-  T = 0;
-  n = 0.0;
-  sumPx = 0.0;
-  sumPxx = 0.0;
-}
-
-void bob::learn::misc::GMMStats::save(bob::io::base::HDF5File& config) const {
-  //please note we fix the output values to be of a precise type so they can be
-  //retrieved at any platform with the exact same precision.
-  // TODO: add versioning, replace int64_t by uint64_t and log_liklihood by log_likelihood
-  int64_t sumpx_shape_0 = sumPx.shape()[0];
-  int64_t sumpx_shape_1 = sumPx.shape()[1];
-  config.set("n_gaussians", sumpx_shape_0);
-  config.set("n_inputs", sumpx_shape_1);
-  config.set("log_liklihood", log_likelihood); //double
-  config.set("T", static_cast<int64_t>(T));
-  config.setArray("n", n); //Array1d
-  config.setArray("sumPx", sumPx); //Array2d
-  config.setArray("sumPxx", sumPxx); //Array2d
-}
-
-void bob::learn::misc::GMMStats::load(bob::io::base::HDF5File& config) {
-  log_likelihood = config.read<double>("log_liklihood");
-  int64_t n_gaussians = config.read<int64_t>("n_gaussians");
-  int64_t n_inputs = config.read<int64_t>("n_inputs");
-  T = static_cast<size_t>(config.read<int64_t>("T"));
-
-  //resize arrays to prepare for HDF5 readout
-  n.resize(n_gaussians);
-  sumPx.resize(n_gaussians, n_inputs);
-  sumPxx.resize(n_gaussians, n_inputs);
-
-  //load data
-  config.readArray("n", n);
-  config.readArray("sumPx", sumPx);
-  config.readArray("sumPxx", sumPxx);
-}
-
-namespace bob { namespace learn { namespace misc {
-  std::ostream& operator<<(std::ostream& os, const GMMStats& g) {
-    os << "log_likelihood = " << g.log_likelihood << std::endl;
-    os << "T = " << g.T << std::endl;
-    os << "n = " << g.n;
-    os << "sumPx = " << g.sumPx;
-    os << "sumPxx = " << g.sumPxx;
-
-    return os;
-  }
-} } }
diff --git a/bob/learn/misc/cpp/Gaussian.cpp b/bob/learn/misc/cpp/Gaussian.cpp
deleted file mode 100644
index 75219d132d48f648db0de13fcd32dfa4ab4a4eb1..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/Gaussian.cpp
+++ /dev/null
@@ -1,184 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/Gaussian.h>
-
-#include <bob.core/assert.h>
-#include <bob.math/log.h>
-
-bob::learn::misc::Gaussian::Gaussian() {
-  resize(0);
-}
-
-bob::learn::misc::Gaussian::Gaussian(const size_t n_inputs) {
-  resize(n_inputs);
-}
-
-bob::learn::misc::Gaussian::Gaussian(const bob::learn::misc::Gaussian& other) {
-  copy(other);
-}
-
-bob::learn::misc::Gaussian::Gaussian(bob::io::base::HDF5File& config) {
-  load(config);
-}
-
-bob::learn::misc::Gaussian::~Gaussian() {
-}
-
-bob::learn::misc::Gaussian& bob::learn::misc::Gaussian::operator=(const bob::learn::misc::Gaussian &other) {
-  if(this != &other)
-    copy(other);
-
-  return *this;
-}
-
-bool bob::learn::misc::Gaussian::operator==(const bob::learn::misc::Gaussian& b) const
-{
-  return (bob::core::array::isEqual(m_mean, b.m_mean) &&
-          bob::core::array::isEqual(m_variance, b.m_variance) &&
-          bob::core::array::isEqual(m_variance_thresholds, b.m_variance_thresholds));
-}
-
-bool bob::learn::misc::Gaussian::operator!=(const bob::learn::misc::Gaussian& b) const {
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::Gaussian::is_similar_to(const bob::learn::misc::Gaussian& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  return (bob::core::array::isClose(m_mean, b.m_mean, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_variance, b.m_variance, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_variance_thresholds, b.m_variance_thresholds, r_epsilon, a_epsilon));
-}
-
-void bob::learn::misc::Gaussian::copy(const bob::learn::misc::Gaussian& other) {
-  m_n_inputs = other.m_n_inputs;
-
-  m_mean.resize(m_n_inputs);
-  m_mean = other.m_mean;
-
-  m_variance.resize(m_n_inputs);
-  m_variance = other.m_variance;
-
-  m_variance_thresholds.resize(m_n_inputs);
-  m_variance_thresholds = other.m_variance_thresholds;
-
-  m_n_log2pi = other.m_n_log2pi;
-  m_g_norm = other.m_g_norm;
-}
-
-
-void bob::learn::misc::Gaussian::setNInputs(const size_t n_inputs) {
-  resize(n_inputs);
-}
-
-void bob::learn::misc::Gaussian::resize(const size_t n_inputs) {
-  m_n_inputs = n_inputs;
-  m_mean.resize(m_n_inputs);
-  m_mean = 0;
-  m_variance.resize(m_n_inputs);
-  m_variance = 1;
-  m_variance_thresholds.resize(m_n_inputs);
-  m_variance_thresholds = 0;
-
-  // Re-compute g_norm, because m_n_inputs and m_variance
-  // have changed
-  preComputeNLog2Pi();
-  preComputeConstants();
-}
-
-void bob::learn::misc::Gaussian::setMean(const blitz::Array<double,1> &mean) {
-  // Check and set
-  bob::core::array::assertSameShape(m_mean, mean);
-  m_mean = mean;
-}
-
-void bob::learn::misc::Gaussian::setVariance(const blitz::Array<double,1> &variance) {
-  // Check and set
-  bob::core::array::assertSameShape(m_variance, variance);
-  m_variance = variance;
-
-  // Variance flooring
-  applyVarianceThresholds();
-}
-
-void bob::learn::misc::Gaussian::setVarianceThresholds(const blitz::Array<double,1> &variance_thresholds) {
-  // Check and set
-  bob::core::array::assertSameShape(m_variance_thresholds, variance_thresholds);
-  m_variance_thresholds = variance_thresholds;
-
-  // Variance flooring
-  applyVarianceThresholds();
-}
-
-void bob::learn::misc::Gaussian::setVarianceThresholds(const double value) {
-  blitz::Array<double,1> variance_thresholds(m_n_inputs);
-  variance_thresholds = value;
-  setVarianceThresholds(variance_thresholds);
-}
-
-void bob::learn::misc::Gaussian::applyVarianceThresholds() {
-   // Apply variance flooring threshold
-  m_variance = blitz::where( m_variance < m_variance_thresholds, m_variance_thresholds, m_variance);
-
-  // Re-compute g_norm, because m_variance has changed
-  preComputeConstants();
-}
-
-double bob::learn::misc::Gaussian::logLikelihood(const blitz::Array<double,1> &x) const {
-  // Check
-  bob::core::array::assertSameShape(x, m_mean);
-  return logLikelihood_(x);
-}
-
-double bob::learn::misc::Gaussian::logLikelihood_(const blitz::Array<double,1> &x) const {
-  double z = blitz::sum(blitz::pow2(x - m_mean) / m_variance);
-  // Log Likelihood
-  return (-0.5 * (m_g_norm + z));
-}
-
-void bob::learn::misc::Gaussian::preComputeNLog2Pi() {
-  m_n_log2pi = m_n_inputs * bob::math::Log::Log2Pi;
-}
-
-void bob::learn::misc::Gaussian::preComputeConstants() {
-  m_g_norm = m_n_log2pi + blitz::sum(blitz::log(m_variance));
-}
-
-void bob::learn::misc::Gaussian::save(bob::io::base::HDF5File& config) const {
-  config.setArray("m_mean", m_mean);
-  config.setArray("m_variance", m_variance);
-  config.setArray("m_variance_thresholds", m_variance_thresholds);
-  config.set("g_norm", m_g_norm);
-  int64_t v = static_cast<int64_t>(m_n_inputs);
-  config.set("m_n_inputs", v);
-}
-
-void bob::learn::misc::Gaussian::load(bob::io::base::HDF5File& config) {
-  int64_t v = config.read<int64_t>("m_n_inputs");
-  m_n_inputs = static_cast<size_t>(v);
-
-  m_mean.resize(m_n_inputs);
-  m_variance.resize(m_n_inputs);
-  m_variance_thresholds.resize(m_n_inputs);
-
-  config.readArray("m_mean", m_mean);
-  config.readArray("m_variance", m_variance);
-  config.readArray("m_variance_thresholds", m_variance_thresholds);
-
-  preComputeNLog2Pi();
-  m_g_norm = config.read<double>("g_norm");
-}
-
-namespace bob { namespace learn { namespace misc {
-  std::ostream& operator<<(std::ostream& os, const Gaussian& g) {
-    os << "Mean = " << g.m_mean << std::endl;
-    os << "Variance = " << g.m_variance << std::endl;
-    return os;
-  }
-} } }
diff --git a/bob/learn/misc/cpp/ISVBase.cpp b/bob/learn/misc/cpp/ISVBase.cpp
deleted file mode 100644
index 7884786b101acbce270b297bb29e25f98b194bfc..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/ISVBase.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-/**
- * @date Tue Jan 27 16:02:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/ISVBase.h>
-#include <bob.core/array_copy.h>
-#include <bob.math/linear.h>
-#include <bob.math/inv.h>
-#include <bob.learn.misc/LinearScoring.h>
-#include <limits>
-
-
-//////////////////// ISVBase ////////////////////
-bob::learn::misc::ISVBase::ISVBase()
-{
-}
-
-bob::learn::misc::ISVBase::ISVBase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
-    const size_t ru):
-  m_base(ubm, ru, 1)
-{
-  blitz::Array<double,2>& V = m_base.updateV();
-  V = 0;
-}
-
-bob::learn::misc::ISVBase::ISVBase(const bob::learn::misc::ISVBase& other):
-  m_base(other.m_base)
-{
-}
-
-
-bob::learn::misc::ISVBase::ISVBase(bob::io::base::HDF5File& config)
-{
-  load(config);
-}
-
-bob::learn::misc::ISVBase::~ISVBase() {
-}
-
-void bob::learn::misc::ISVBase::save(bob::io::base::HDF5File& config) const
-{
-  config.setArray("U", m_base.getU());
-  config.setArray("d", m_base.getD());
-}
-
-void bob::learn::misc::ISVBase::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  blitz::Array<double,2> U = config.readArray<double,2>("U");
-  blitz::Array<double,1> d = config.readArray<double,1>("d");
-  const int ru = U.extent(1);
-  if (!m_base.getUbm())
-    m_base.resize(ru, 1, U.extent(0));
-  else
-    m_base.resize(ru, 1);
-  m_base.setU(U);
-  m_base.setD(d);
-  blitz::Array<double,2>& V = m_base.updateV();
-  V = 0;
-}
-
-bob::learn::misc::ISVBase&
-bob::learn::misc::ISVBase::operator=(const bob::learn::misc::ISVBase& other)
-{
-  if (this != &other)
-  {
-    m_base = other.m_base;
-  }
-  return *this;
-}
-
diff --git a/bob/learn/misc/cpp/ISVMachine.cpp b/bob/learn/misc/cpp/ISVMachine.cpp
deleted file mode 100644
index ea5ad28ba224c7cbaa5359b5c7e6bcb8b9268098..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/ISVMachine.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * @date Tue Jan 27 16:06:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/ISVMachine.h>
-#include <bob.core/array_copy.h>
-#include <bob.math/linear.h>
-#include <bob.math/inv.h>
-#include <bob.learn.misc/LinearScoring.h>
-#include <limits>
-
-
-//////////////////// ISVMachine ////////////////////
-bob::learn::misc::ISVMachine::ISVMachine():
-  m_z(1)
-{
-  resizeTmp();
-}
-
-bob::learn::misc::ISVMachine::ISVMachine(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base):
-  m_isv_base(isv_base),
-  m_z(isv_base->getSupervectorLength())
-{
-  if (!m_isv_base->getUbm())
-    throw std::runtime_error("No UBM was set in the JFA machine.");
-  updateCache();
-  resizeTmp();
-}
-
-
-bob::learn::misc::ISVMachine::ISVMachine(const bob::learn::misc::ISVMachine& other):
-  m_isv_base(other.m_isv_base),
-  m_z(bob::core::array::ccopy(other.m_z))
-{
-  updateCache();
-  resizeTmp();
-}
-
-bob::learn::misc::ISVMachine::ISVMachine(bob::io::base::HDF5File& config)
-{
-  load(config);
-}
-
-bob::learn::misc::ISVMachine::~ISVMachine() {
-}
-
-bob::learn::misc::ISVMachine&
-bob::learn::misc::ISVMachine::operator=(const bob::learn::misc::ISVMachine& other)
-{
-  if (this != &other)
-  {
-    m_isv_base = other.m_isv_base;
-    m_z.reference(bob::core::array::ccopy(other.m_z));
-  }
-  return *this;
-}
-
-bool bob::learn::misc::ISVMachine::operator==(const bob::learn::misc::ISVMachine& other) const
-{
-  return (*m_isv_base == *(other.m_isv_base) &&
-          bob::core::array::isEqual(m_z, other.m_z));
-}
-
-bool bob::learn::misc::ISVMachine::operator!=(const bob::learn::misc::ISVMachine& b) const
-{
-  return !(this->operator==(b));
-}
-
-
-bool bob::learn::misc::ISVMachine::is_similar_to(const bob::learn::misc::ISVMachine& b,
-    const double r_epsilon, const double a_epsilon) const
-{
-  return (m_isv_base->is_similar_to(*(b.m_isv_base), r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_z, b.m_z, r_epsilon, a_epsilon));
-}
-
-void bob::learn::misc::ISVMachine::save(bob::io::base::HDF5File& config) const
-{
-  config.setArray("z", m_z);
-}
-
-void bob::learn::misc::ISVMachine::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  blitz::Array<double,1> z = config.readArray<double,1>("z");
-  if (!m_isv_base)
-    m_z.resize(z.extent(0));
-  setZ(z);
-  // update cache
-  updateCache();
-  resizeTmp();
-}
-
-void bob::learn::misc::ISVMachine::setZ(const blitz::Array<double,1>& z)
-{
-  if(z.extent(0) != m_z.extent(0)) { //checks dimension
-    boost::format m("size of input vector `z' (%d) does not match the expected size (%d)");
-    m % z.extent(0) % m_z.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  m_z.reference(bob::core::array::ccopy(z));
-  // update cache
-  updateCache();
-}
-
-void bob::learn::misc::ISVMachine::setISVBase(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base)
-{
-  if (!isv_base->getUbm())
-    throw std::runtime_error("No UBM was set in the JFA machine.");
-  m_isv_base = isv_base;
-  // Resize variables
-  resize();
-}
-
-void bob::learn::misc::ISVMachine::resize()
-{
-  m_z.resizeAndPreserve(getSupervectorLength());
-  updateCache();
-  resizeTmp();
-}
-
-void bob::learn::misc::ISVMachine::resizeTmp()
-{
-  if (m_isv_base)
-  {
-    m_tmp_Ux.resize(getSupervectorLength());
-  }
-}
-
-void bob::learn::misc::ISVMachine::updateCache()
-{
-  if (m_isv_base)
-  {
-    // m + Dz
-    m_cache_mDz.resize(getSupervectorLength());
-    m_cache_mDz = m_isv_base->getD()*m_z + m_isv_base->getUbm()->getMeanSupervector();
-    m_cache_x.resize(getDimRu());
-  }
-}
-
-void bob::learn::misc::ISVMachine::estimateUx(const bob::learn::misc::GMMStats& gmm_stats,
-  blitz::Array<double,1>& Ux)
-{
-  estimateX(gmm_stats, m_cache_x);
-  bob::math::prod(m_isv_base->getU(), m_cache_x, Ux);
-}
-
-double bob::learn::misc::ISVMachine::forward(const bob::learn::misc::GMMStats& input)
-{
-  return forward_(input);
-}
-
-double bob::learn::misc::ISVMachine::forward(const bob::learn::misc::GMMStats& gmm_stats,
-  const blitz::Array<double,1>& Ux)
-{
-  // Checks that a Base machine has been set
-  if (!m_isv_base) throw std::runtime_error("No UBM was set in the JFA machine.");
-
-  return bob::learn::misc::linearScoring(m_cache_mDz,
-            m_isv_base->getUbm()->getMeanSupervector(), m_isv_base->getUbm()->getVarianceSupervector(),
-            gmm_stats, Ux, true);
-}
-
-double bob::learn::misc::ISVMachine::forward_(const bob::learn::misc::GMMStats& input)
-{
-  // Checks that a Base machine has been set
-  if(!m_isv_base) throw std::runtime_error("No UBM was set in the JFA machine.");
-
-  // Ux and GMMStats
-  estimateX(input, m_cache_x);
-  bob::math::prod(m_isv_base->getU(), m_cache_x, m_tmp_Ux);
-
-  return bob::learn::misc::linearScoring(m_cache_mDz,
-            m_isv_base->getUbm()->getMeanSupervector(), m_isv_base->getUbm()->getVarianceSupervector(),
-            input, m_tmp_Ux, true);
-}
-
diff --git a/bob/learn/misc/cpp/ISVTrainer.cpp b/bob/learn/misc/cpp/ISVTrainer.cpp
deleted file mode 100644
index 5ccedb078255554f33e45004323589dfffdbbff0..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/ISVTrainer.cpp
+++ /dev/null
@@ -1,130 +0,0 @@
-/**
- * @date Tue Jul 19 12:16:17 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Joint Factor Analysis Trainer
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/ISVTrainer.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/array_random.h>
-#include <bob.math/inv.h>
-#include <bob.math/linear.h>
-#include <bob.core/check.h>
-#include <bob.core/array_repmat.h>
-#include <algorithm>
-
-
-//////////////////////////// ISVTrainer ///////////////////////////
-bob::learn::misc::ISVTrainer::ISVTrainer(const double relevance_factor):
-  m_relevance_factor(relevance_factor),
-  m_rng(new boost::mt19937())
-{}
-
-bob::learn::misc::ISVTrainer::ISVTrainer(const bob::learn::misc::ISVTrainer& other):
-  m_rng(other.m_rng)
-{
-  m_relevance_factor      = other.m_relevance_factor;
-}
-
-bob::learn::misc::ISVTrainer::~ISVTrainer()
-{}
-
-bob::learn::misc::ISVTrainer& bob::learn::misc::ISVTrainer::operator=
-(const bob::learn::misc::ISVTrainer& other)
-{
-  if (this != &other)
-  {
-    m_rng                   = other.m_rng;
-    m_relevance_factor      = other.m_relevance_factor;
-  }
-  return *this;
-}
-
-bool bob::learn::misc::ISVTrainer::operator==(const bob::learn::misc::ISVTrainer& b) const
-{
-  return m_rng == b.m_rng && 
-         m_relevance_factor == b.m_relevance_factor;
-}
-
-bool bob::learn::misc::ISVTrainer::operator!=(const bob::learn::misc::ISVTrainer& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::ISVTrainer::is_similar_to(const bob::learn::misc::ISVTrainer& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  return  m_rng == b.m_rng && 
-          m_relevance_factor == b.m_relevance_factor;
-}
-
-void bob::learn::misc::ISVTrainer::initialize(bob::learn::misc::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
-  m_base_trainer.initializeXYZ(ar);
-
-  blitz::Array<double,2>& U = machine.updateU();
-  bob::core::array::randn(*m_rng, U);
-  initializeD(machine);
-  machine.precompute();
-}
-
-void bob::learn::misc::ISVTrainer::initializeD(bob::learn::misc::ISVBase& machine) const
-{
-  // D = sqrt(variance(UBM) / relevance_factor)
-  blitz::Array<double,1> d = machine.updateD();
-  d = sqrt(machine.getBase().getUbmVariance() / m_relevance_factor);
-}
-
-void bob::learn::misc::ISVTrainer::eStep(bob::learn::misc::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  m_base_trainer.resetXYZ();
-
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateX(base, ar);
-  m_base_trainer.updateZ(base, ar);
-  m_base_trainer.computeAccumulatorsU(base, ar);
-}
-
-void bob::learn::misc::ISVTrainer::mStep(bob::learn::misc::ISVBase& machine)
-{
-  blitz::Array<double,2>& U = machine.updateU();
-  m_base_trainer.updateU(U);
-  machine.precompute();
-}
-
-double bob::learn::misc::ISVTrainer::computeLikelihood(bob::learn::misc::ISVBase& machine)
-{
-  // TODO
-  return 0;
-}
-
-void bob::learn::misc::ISVTrainer::enrol(bob::learn::misc::ISVMachine& machine,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
-  const size_t n_iter)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > vvec;
-  vvec.push_back(ar);
-
-  const bob::learn::misc::FABase& fb = machine.getISVBase()->getBase();
-
-  m_base_trainer.initUbmNidSumStatistics(fb, vvec);
-  m_base_trainer.initializeXYZ(vvec);
-
-  for (size_t i=0; i<n_iter; ++i) {
-    m_base_trainer.updateX(fb, vvec);
-    m_base_trainer.updateZ(fb, vvec);
-  }
-
-  const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
-  machine.setZ(z);
-}
-
-
-
diff --git a/bob/learn/misc/cpp/IVectorMachine.cpp b/bob/learn/misc/cpp/IVectorMachine.cpp
deleted file mode 100644
index 61f7ee40b574ee4a80234952c3b08794d6294bd1..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/IVectorMachine.cpp
+++ /dev/null
@@ -1,249 +0,0 @@
-/**
- * @date Sat Mar 30 21:00:00 2013 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/IVectorMachine.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/check.h>
-#include <bob.math/linear.h>
-#include <bob.math/linsolve.h>
-
-bob::learn::misc::IVectorMachine::IVectorMachine()
-{
-}
-
-bob::learn::misc::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
-    const size_t rt, const double variance_threshold):
-  m_ubm(ubm), m_rt(rt),
-  m_T(getSupervectorLength(),rt), m_sigma(getSupervectorLength()),
-  m_variance_threshold(variance_threshold)
-{
-  resizePrecompute();
-}
-
-bob::learn::misc::IVectorMachine::IVectorMachine(const bob::learn::misc::IVectorMachine& other):
-  m_ubm(other.m_ubm), m_rt(other.m_rt),
-  m_T(bob::core::array::ccopy(other.m_T)),
-  m_sigma(bob::core::array::ccopy(other.m_sigma)),
-  m_variance_threshold(other.m_variance_threshold)
-{
-  resizePrecompute();
-}
-
-bob::learn::misc::IVectorMachine::IVectorMachine(bob::io::base::HDF5File& config)
-{
-  load(config);
-}
-
-bob::learn::misc::IVectorMachine::~IVectorMachine() {
-}
-
-void bob::learn::misc::IVectorMachine::save(bob::io::base::HDF5File& config) const
-{
-  config.setArray("m_T", m_T);
-  config.setArray("m_sigma", m_sigma);
-  config.set("m_variance_threshold", m_variance_threshold);
-}
-
-void bob::learn::misc::IVectorMachine::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  m_T.reference(config.readArray<double,2>("m_T"));
-  m_rt = m_T.extent(1);
-  m_sigma.reference(config.readArray<double,1>("m_sigma"));
-  m_variance_threshold = config.read<double>("m_variance_threshold");
-  resizePrecompute();
-}
-
-void bob::learn::misc::IVectorMachine::resize(const size_t rt)
-{
-  m_rt = rt;
-  m_T.resizeAndPreserve(m_T.extent(0), rt);
-  resizePrecompute();
-}
-
-bob::learn::misc::IVectorMachine&
-bob::learn::misc::IVectorMachine::operator=(const bob::learn::misc::IVectorMachine& other)
-{
-  if (this != &other)
-  {
-    m_ubm = other.m_ubm;
-    m_rt = other.m_rt;
-    m_T.reference(bob::core::array::ccopy(other.m_T));
-    m_sigma.reference(bob::core::array::ccopy(other.m_sigma));
-    m_variance_threshold = other.m_variance_threshold;
-    resizePrecompute();
-  }
-  return *this;
-}
-
-bool bob::learn::misc::IVectorMachine::operator==(const IVectorMachine& b) const
-{
-  return (((m_ubm && b.m_ubm) && *m_ubm == *(b.m_ubm)) || (!m_ubm && !b.m_ubm)) &&
-         m_rt == b.m_rt &&
-         bob::core::array::isEqual(m_T, b.m_T) &&
-         bob::core::array::isEqual(m_sigma, b.m_sigma) &&
-         m_variance_threshold == b.m_variance_threshold;
-}
-
-bool bob::learn::misc::IVectorMachine::operator!=(const bob::learn::misc::IVectorMachine& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::IVectorMachine::is_similar_to(const IVectorMachine& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  // TODO: update with new is_similar_to method
-  return (((m_ubm && b.m_ubm) && m_ubm->is_similar_to(*(b.m_ubm), r_epsilon)) || (!m_ubm && !b.m_ubm)) &&
-          m_rt == b.m_rt &&
-          bob::core::array::isClose(m_T, b.m_T, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_sigma, b.m_sigma, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_variance_threshold, b.m_variance_threshold, r_epsilon, a_epsilon);
-}
-
-void bob::learn::misc::IVectorMachine::setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
-{
-  m_ubm = ubm;
-  resizePrecompute();
-}
-
-void bob::learn::misc::IVectorMachine::setT(const blitz::Array<double,2>& T)
-{
-  bob::core::array::assertSameShape(m_T, T);
-  m_T = T;
-  // Update cache
-  precompute();
-}
-
-void bob::learn::misc::IVectorMachine::setSigma(const blitz::Array<double,1>& sigma)
-{
-  bob::core::array::assertSameShape(m_sigma, sigma);
-  m_sigma = sigma;
-  // Update cache
-  precompute();
-}
-
-
-void bob::learn::misc::IVectorMachine::setVarianceThreshold(const double thd)
-{
-  m_variance_threshold = thd;
-  // Update cache
-  precompute();
-}
-
-void bob::learn::misc::IVectorMachine::applyVarianceThreshold()
-{
-  // Apply variance flooring threshold
-  m_sigma = blitz::where(m_sigma < m_variance_threshold, m_variance_threshold, m_sigma);
-}
-
-void bob::learn::misc::IVectorMachine::precompute()
-{
-  if (m_ubm)
-  {
-    // Apply variance threshold
-    applyVarianceThreshold();
-
-    blitz::firstIndex i;
-    blitz::secondIndex j;
-    blitz::Range rall = blitz::Range::all();
-    const int C = (int)m_ubm->getNGaussians();
-    const int D = (int)m_ubm->getNInputs();
-    // T_{c}^{T}.sigma_{c}^{-1}
-    for (int c=0; c<C; ++c)
-    {
-      blitz::Array<double,2> Tct_sigmacInv = m_cache_Tct_sigmacInv(c, rall, rall);
-      blitz::Array<double,2> Tc = m_T(blitz::Range(c*D,(c+1)*D-1), rall);
-      blitz::Array<double,2> Tct = Tc.transpose(1,0);
-      blitz::Array<double,1> sigma_c = m_sigma(blitz::Range(c*D,(c+1)*D-1));
-      Tct_sigmacInv = Tct(i,j) / sigma_c(j);
-    }
-
-    // T_{c}^{T}.sigma_{c}^{-1}.T_{c}
-    for (int c=0; c<C; ++c)
-    {
-      blitz::Array<double,2> Tc = m_T(blitz::Range(c*D,(c+1)*D-1), rall);
-      blitz::Array<double,2> Tct_sigmacInv = m_cache_Tct_sigmacInv(c, rall, rall);
-      blitz::Array<double,2> Tct_sigmacInv_Tc = m_cache_Tct_sigmacInv_Tc(c, rall, rall);
-      bob::math::prod(Tct_sigmacInv, Tc, Tct_sigmacInv_Tc);
-    }
-  }
-}
-
-void bob::learn::misc::IVectorMachine::resizePrecompute()
-{
-  resizeCache();
-  resizeTmp();
-  precompute();
-}
-
-void bob::learn::misc::IVectorMachine::resizeCache()
-{
-  if (m_ubm)
-  {
-    const int C = (int)m_ubm->getNGaussians();
-    const int D = (int)m_ubm->getNInputs();
-    m_cache_Tct_sigmacInv.resize(C, (int)m_rt, D);
-    m_cache_Tct_sigmacInv_Tc.resize(C, (int)m_rt, (int)m_rt);
-  }
-}
-
-void bob::learn::misc::IVectorMachine::resizeTmp()
-{
-  if (m_ubm)
-    m_tmp_d.resize(m_ubm->getNInputs());
-  m_tmp_t1.resize(m_rt);
-  m_tmp_t2.resize(m_rt);
-  m_tmp_tt.resize(m_rt, m_rt);
-}
-
-void bob::learn::misc::IVectorMachine::forward(const bob::learn::misc::GMMStats& gs,
-  blitz::Array<double,1>& ivector) const
-{
-  bob::core::array::assertSameDimensionLength(ivector.extent(0), (int)m_rt);  
-  forward_(gs, ivector);
-}
-
-void bob::learn::misc::IVectorMachine::computeIdTtSigmaInvT(
-  const bob::learn::misc::GMMStats& gs, blitz::Array<double,2>& output) const
-{
-  // Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
-  blitz::Range rall = blitz::Range::all();
-  bob::math::eye(output);
-  for (int c=0; c<(int)getNGaussians(); ++c)
-    output += gs.n(c) * m_cache_Tct_sigmacInv_Tc(c, rall, rall);
-}
-
-void bob::learn::misc::IVectorMachine::computeTtSigmaInvFnorm(
-  const bob::learn::misc::GMMStats& gs, blitz::Array<double,1>& output) const
-{
-  // Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
-  blitz::Range rall = blitz::Range::all();
-  output = 0;
-  for (int c=0; c<(int)getNGaussians(); ++c)
-  {
-    m_tmp_d = gs.sumPx(c,rall) - gs.n(c) * m_ubm->getGaussian(c)->getMean();
-    blitz::Array<double,2> Tct_sigmacInv = m_cache_Tct_sigmacInv(c, rall, rall);
-    bob::math::prod(Tct_sigmacInv, m_tmp_d, m_tmp_t2);
-
-    output += m_tmp_t2;
-  }
-}
-
-void bob::learn::misc::IVectorMachine::forward_(const bob::learn::misc::GMMStats& gs,
-  blitz::Array<double,1>& ivector) const
-{
-  // Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
-  computeIdTtSigmaInvT(gs, m_tmp_tt);
-
-  // Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
-  computeTtSigmaInvFnorm(gs, m_tmp_t1);
-
-  // Solves m_tmp_tt.ivector = m_tmp_t1
-  bob::math::linsolve(m_tmp_tt, ivector, m_tmp_t1);
-}
-
diff --git a/bob/learn/misc/cpp/IVectorTrainer.cpp b/bob/learn/misc/cpp/IVectorTrainer.cpp
deleted file mode 100644
index 86adf70ec563932999d76fd05ab591647ad019fe..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/IVectorTrainer.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * @date Sun Mar 31 20:15:00 2013 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/IVectorTrainer.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/array_random.h>
-#include <bob.math/inv.h>
-#include <bob.core/check.h>
-#include <bob.core/array_repmat.h>
-#include <algorithm>
-
-#include <bob.math/linear.h>
-#include <bob.math/linsolve.h>
-
-bob::learn::misc::IVectorTrainer::IVectorTrainer(const bool update_sigma):
-  m_update_sigma(update_sigma),
-  m_rng(new boost::mt19937())
-{}
-
-bob::learn::misc::IVectorTrainer::IVectorTrainer(const bob::learn::misc::IVectorTrainer& other):
-  m_update_sigma(other.m_update_sigma)
-{
-  m_rng                   = other.m_rng;
-  m_acc_Nij_wij2.reference(bob::core::array::ccopy(other.m_acc_Nij_wij2));
-  m_acc_Fnormij_wij.reference(bob::core::array::ccopy(other.m_acc_Fnormij_wij));
-  m_acc_Nij.reference(bob::core::array::ccopy(other.m_acc_Nij));
-  m_acc_Snormij.reference(bob::core::array::ccopy(other.m_acc_Snormij));
-
-  m_tmp_wij.reference(bob::core::array::ccopy(other.m_tmp_wij));
-  m_tmp_wij2.reference(bob::core::array::ccopy(other.m_tmp_wij2));
-  m_tmp_d1.reference(bob::core::array::ccopy(other.m_tmp_d1));
-  m_tmp_t1.reference(bob::core::array::ccopy(other.m_tmp_t1));
-  m_tmp_dd1.reference(bob::core::array::ccopy(other.m_tmp_dd1));
-  m_tmp_dt1.reference(bob::core::array::ccopy(other.m_tmp_dt1));
-  m_tmp_tt1.reference(bob::core::array::ccopy(other.m_tmp_tt1));
-  m_tmp_tt2.reference(bob::core::array::ccopy(other.m_tmp_tt2));
-}
-
-bob::learn::misc::IVectorTrainer::~IVectorTrainer()
-{
-}
-
-void bob::learn::misc::IVectorTrainer::initialize(
-  bob::learn::misc::IVectorMachine& machine)
-{
-
-  const int C = machine.getNGaussians();
-  const int D = machine.getNInputs();
-  const int Rt = machine.getDimRt();
-
-  // Cache
-  m_acc_Nij_wij2.resize(C,Rt,Rt);
-  m_acc_Fnormij_wij.resize(C,D,Rt);
-  if (m_update_sigma)
-  {
-    m_acc_Nij.resize(C);
-    m_acc_Snormij.resize(C,D);
-  }
-
-  // Tmp
-  m_tmp_wij.resize(Rt);
-  m_tmp_wij2.resize(Rt,Rt);
-  m_tmp_d1.resize(D);
-  m_tmp_t1.resize(Rt);
-
-  m_tmp_dt1.resize(D,Rt);
-  m_tmp_tt1.resize(Rt,Rt);
-  m_tmp_tt2.resize(Rt,Rt);
-  if (m_update_sigma)
-    m_tmp_dd1.resize(D,D);
-
-  // Initializes \f$T\f$ and \f$\Sigma\f$ of the machine
-  blitz::Array<double,2>& T = machine.updateT();
-  bob::core::array::randn(*m_rng, T);
-  blitz::Array<double,1>& sigma = machine.updateSigma();
-  sigma = machine.getUbm()->getVarianceSupervector();
-  machine.precompute();
-}
-
-void bob::learn::misc::IVectorTrainer::eStep(
-  bob::learn::misc::IVectorMachine& machine,
-  const std::vector<bob::learn::misc::GMMStats>& data)
-{
-  blitz::Range rall = blitz::Range::all();
-  const int C = machine.getNGaussians();
-
-  // Reinitializes accumulators to 0
-  m_acc_Nij_wij2 = 0.;
-  m_acc_Fnormij_wij = 0.;
-  if (m_update_sigma)
-  {
-    m_acc_Nij = 0.;
-    m_acc_Snormij = 0.;
-  }
-  for (std::vector<bob::learn::misc::GMMStats>::const_iterator it = data.begin();
-       it != data.end(); ++it)
-  {
-    // Computes E{wij} and E{wij.wij^{T}}
-    // a. Computes \f$T^{T} \Sigma^{-1} F_{norm}\f$
-    machine.computeTtSigmaInvFnorm(*it, m_tmp_t1);
-    // b. Computes \f$Id + T^{T} \Sigma^{-1} T\f$
-    machine.computeIdTtSigmaInvT(*it, m_tmp_tt1);
-    // c. Computes \f$(Id + T^{T} \Sigma^{-1} T)^{-1}\f$
-
-    bob::math::inv(m_tmp_tt1, m_tmp_tt2);
-    // d. Computes \f$E{wij} = (Id + T^{T} \Sigma^{-1} T)^{-1} T^{T} \Sigma^{-1} F_{norm}\f$
-    bob::math::prod(m_tmp_tt2, m_tmp_t1, m_tmp_wij); // E{wij}
-    // e.  Computes \f$E{wij}.E{wij^{T}}\f$
-    bob::math::prod(m_tmp_wij, m_tmp_wij, m_tmp_wij2);
-    // f. Computes \f$E{wij.wij^{T}} = (Id + T^{T} \Sigma^{-1} T)^{-1} + E{wij}.E{wij^{T}}\f$
-    m_tmp_wij2 += m_tmp_tt2; // E{wij.wij^{T}}
-
-    if (m_update_sigma)
-      m_acc_Nij += (*it).n;
-
-    for (int c=0; c<C; ++c)
-    {
-      blitz::Array<double,2> acc_Nij_wij2_c = m_acc_Nij_wij2(c,rall,rall);
-      blitz::Array<double,2> acc_Fnormij_wij = m_acc_Fnormij_wij(c,rall,rall);
-      // acc_Nij_wij2_c += Nijc . E{wij.wij^{T}}
-      acc_Nij_wij2_c += (*it).n(c) * m_tmp_wij2;
-      blitz::Array<double,1> mc = machine.getUbm()->getGaussian(c)->getMean();
-      // m_tmp_d1 = Fijc - Nijc * ubmmean_{c}
-      m_tmp_d1 = (*it).sumPx(c,rall) - (*it).n(c)*mc; // Fnorm_c
-      // m_tmp_dt1 = (Fijc - Nijc * ubmmean_{c}).E{wij}^{T}
-      bob::math::prod(m_tmp_d1, m_tmp_wij, m_tmp_dt1);
-      // acc_Fnormij_wij += (Fijc - Nijc * ubmmean_{c}).E{wij}^{T}
-      acc_Fnormij_wij += m_tmp_dt1;
-      if (m_update_sigma)
-      {
-        blitz::Array<double,1> acc_Snormij_c = m_acc_Snormij(c,rall);
-        acc_Snormij_c += (*it).sumPxx(c,rall) - mc*((*it).sumPx(c,rall) + m_tmp_d1);
-      }
-    }
-  }
-}
-
-void bob::learn::misc::IVectorTrainer::mStep(
-  bob::learn::misc::IVectorMachine& machine)
-{
-  blitz::Range rall = blitz::Range::all();
-  blitz::Array<double,2>& T = machine.updateT();
-  blitz::Array<double,1>& sigma = machine.updateSigma();
-  const int C = (int)machine.getNGaussians();
-  const int D = (int)machine.getNInputs();
-  for (int c=0; c<C; ++c)
-  {
-    // Solves linear system A.T = B to update T, based on accumulators of
-    // the eStep()
-    blitz::Array<double,2> acc_Nij_wij2_c = m_acc_Nij_wij2(c,rall,rall);
-    blitz::Array<double,2> tacc_Nij_wij2_c = acc_Nij_wij2_c.transpose(1,0);
-    blitz::Array<double,2> acc_Fnormij_wij_c = m_acc_Fnormij_wij(c,rall,rall);
-    blitz::Array<double,2> tacc_Fnormij_wij_c = acc_Fnormij_wij_c.transpose(1,0);
-    blitz::Array<double,2> T_c = T(blitz::Range(c*D,(c+1)*D-1),rall);
-    blitz::Array<double,2> Tt_c = T_c.transpose(1,0);
-    if (blitz::all(acc_Nij_wij2_c == 0)) // TODO
-      Tt_c = 0;
-    else
-      bob::math::linsolve(tacc_Nij_wij2_c, Tt_c, tacc_Fnormij_wij_c);
-    if (m_update_sigma)
-    {
-      blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*D,(c+1)*D-1));
-      bob::math::prod(acc_Fnormij_wij_c, Tt_c, m_tmp_dd1);
-      bob::math::diag(m_tmp_dd1, m_tmp_d1);
-      sigma_c = (m_acc_Snormij(c,rall) - m_tmp_d1) / m_acc_Nij(c);
-    }
-  }
-  machine.precompute();
-}
-
-
-bob::learn::misc::IVectorTrainer& bob::learn::misc::IVectorTrainer::operator=
-  (const bob::learn::misc::IVectorTrainer &other)
-{
-  if (this != &other)
-  {    
-    m_update_sigma = other.m_update_sigma;
-
-    m_acc_Nij_wij2.reference(bob::core::array::ccopy(other.m_acc_Nij_wij2));
-    m_acc_Fnormij_wij.reference(bob::core::array::ccopy(other.m_acc_Fnormij_wij));
-    m_acc_Nij.reference(bob::core::array::ccopy(other.m_acc_Nij));
-    m_acc_Snormij.reference(bob::core::array::ccopy(other.m_acc_Snormij));
-
-    m_tmp_wij.reference(bob::core::array::ccopy(other.m_tmp_wij));
-    m_tmp_wij2.reference(bob::core::array::ccopy(other.m_tmp_wij2));
-    m_tmp_d1.reference(bob::core::array::ccopy(other.m_tmp_d1));
-    m_tmp_t1.reference(bob::core::array::ccopy(other.m_tmp_t1));
-    m_tmp_dd1.reference(bob::core::array::ccopy(other.m_tmp_dd1));
-    m_tmp_dt1.reference(bob::core::array::ccopy(other.m_tmp_dt1));
-    m_tmp_tt1.reference(bob::core::array::ccopy(other.m_tmp_tt1));
-    m_tmp_tt2.reference(bob::core::array::ccopy(other.m_tmp_tt2));
-  }
-  return *this;
-}
-
-bool bob::learn::misc::IVectorTrainer::operator==
-  (const bob::learn::misc::IVectorTrainer &other) const
-{
-  return m_update_sigma == other.m_update_sigma &&
-         bob::core::array::isEqual(m_acc_Nij_wij2, other.m_acc_Nij_wij2) &&
-         bob::core::array::isEqual(m_acc_Fnormij_wij, other.m_acc_Fnormij_wij) &&
-         bob::core::array::isEqual(m_acc_Nij, other.m_acc_Nij) &&
-         bob::core::array::isEqual(m_acc_Snormij, other.m_acc_Snormij);
-}
-
-bool bob::learn::misc::IVectorTrainer::operator!=
-  (const bob::learn::misc::IVectorTrainer &other) const
-{
-  return !(this->operator==(other));
-}
-
-bool bob::learn::misc::IVectorTrainer::is_similar_to
-  (const bob::learn::misc::IVectorTrainer &other, const double r_epsilon,
-   const double a_epsilon) const
-{
-  return m_update_sigma == other.m_update_sigma &&
-         bob::core::array::isClose(m_acc_Nij_wij2, other.m_acc_Nij_wij2, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_acc_Fnormij_wij, other.m_acc_Fnormij_wij, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_acc_Nij, other.m_acc_Nij, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_acc_Snormij, other.m_acc_Snormij, r_epsilon, a_epsilon);
-}
-
diff --git a/bob/learn/misc/cpp/JFABase.cpp b/bob/learn/misc/cpp/JFABase.cpp
deleted file mode 100644
index a87e57f0f3214f0df0ec1cfc72dfeea247d866a8..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/JFABase.cpp
+++ /dev/null
@@ -1,75 +0,0 @@
-/**
- * @date Tue Jan 27 15:54:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/JFABase.h>
-#include <bob.core/array_copy.h>
-#include <bob.math/linear.h>
-#include <bob.math/inv.h>
-#include <bob.learn.misc/LinearScoring.h>
-#include <limits>
-
-
-//////////////////// JFABase ////////////////////
-bob::learn::misc::JFABase::JFABase()
-{
-}
-
-bob::learn::misc::JFABase::JFABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
-    const size_t ru, const size_t rv):
-  m_base(ubm, ru, rv)
-{
-}
-
-bob::learn::misc::JFABase::JFABase(const bob::learn::misc::JFABase& other):
-  m_base(other.m_base)
-{
-}
-
-
-bob::learn::misc::JFABase::JFABase(bob::io::base::HDF5File& config)
-{
-  load(config);
-}
-
-bob::learn::misc::JFABase::~JFABase() {
-}
-
-void bob::learn::misc::JFABase::save(bob::io::base::HDF5File& config) const
-{
-  config.setArray("U", m_base.getU());
-  config.setArray("V", m_base.getV());
-  config.setArray("d", m_base.getD());
-}
-
-void bob::learn::misc::JFABase::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  blitz::Array<double,2> U = config.readArray<double,2>("U");
-  blitz::Array<double,2> V = config.readArray<double,2>("V");
-  blitz::Array<double,1> d = config.readArray<double,1>("d");
-  const int ru = U.extent(1);
-  const int rv = V.extent(1);
-  if (!m_base.getUbm())
-    m_base.resize(ru, rv, U.extent(0));
-  else
-    m_base.resize(ru, rv);
-  m_base.setU(U);
-  m_base.setV(V);
-  m_base.setD(d);
-}
-
-bob::learn::misc::JFABase&
-bob::learn::misc::JFABase::operator=(const bob::learn::misc::JFABase& other)
-{
-  if (this != &other)
-  {
-    m_base = other.m_base;
-  }
-  return *this;
-}
diff --git a/bob/learn/misc/cpp/JFAMachine.cpp b/bob/learn/misc/cpp/JFAMachine.cpp
deleted file mode 100644
index ae4d1d2f9a6ea848b619151c8a643b77cffe99c4..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/JFAMachine.cpp
+++ /dev/null
@@ -1,206 +0,0 @@
-/**
- * @date Tue Jan 27 16:47:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/JFAMachine.h>
-#include <bob.core/array_copy.h>
-#include <bob.math/linear.h>
-#include <bob.math/inv.h>
-#include <bob.learn.misc/LinearScoring.h>
-#include <limits>
-
-
-//////////////////// JFAMachine ////////////////////
-bob::learn::misc::JFAMachine::JFAMachine():
-  m_y(1), m_z(1)
-{
-  resizeTmp();
-}
-
-bob::learn::misc::JFAMachine::JFAMachine(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base):
-  m_jfa_base(jfa_base),
-  m_y(jfa_base->getDimRv()), m_z(jfa_base->getSupervectorLength())
-{
-  if (!m_jfa_base->getUbm()) throw std::runtime_error("No UBM was set in the JFA machine.");
-  updateCache();
-  resizeTmp();
-}
-
-
-bob::learn::misc::JFAMachine::JFAMachine(const bob::learn::misc::JFAMachine& other):
-  m_jfa_base(other.m_jfa_base),
-  m_y(bob::core::array::ccopy(other.m_y)),
-  m_z(bob::core::array::ccopy(other.m_z))
-{
-  updateCache();
-  resizeTmp();
-}
-
-bob::learn::misc::JFAMachine::JFAMachine(bob::io::base::HDF5File& config)
-{
-  load(config);
-}
-
-bob::learn::misc::JFAMachine::~JFAMachine() {
-}
-
-bob::learn::misc::JFAMachine&
-bob::learn::misc::JFAMachine::operator=(const bob::learn::misc::JFAMachine& other)
-{
-  if (this != &other)
-  {
-    m_jfa_base = other.m_jfa_base;
-    m_y.reference(bob::core::array::ccopy(other.m_y));
-    m_z.reference(bob::core::array::ccopy(other.m_z));
-  }
-  return *this;
-}
-
-bool bob::learn::misc::JFAMachine::operator==(const bob::learn::misc::JFAMachine& other) const
-{
-  return (*m_jfa_base == *(other.m_jfa_base) &&
-          bob::core::array::isEqual(m_y, other.m_y) &&
-          bob::core::array::isEqual(m_z, other.m_z));
-}
-
-bool bob::learn::misc::JFAMachine::operator!=(const bob::learn::misc::JFAMachine& b) const
-{
-  return !(this->operator==(b));
-}
-
-
-bool bob::learn::misc::JFAMachine::is_similar_to(const bob::learn::misc::JFAMachine& b,
-    const double r_epsilon, const double a_epsilon) const
-{
-  return (m_jfa_base->is_similar_to(*(b.m_jfa_base), r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_y, b.m_y, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_z, b.m_z, r_epsilon, a_epsilon));
-}
-
-void bob::learn::misc::JFAMachine::save(bob::io::base::HDF5File& config) const
-{
-  config.setArray("y", m_y);
-  config.setArray("z", m_z);
-}
-
-void bob::learn::misc::JFAMachine::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  blitz::Array<double,1> y = config.readArray<double,1>("y");
-  blitz::Array<double,1> z = config.readArray<double,1>("z");
-  if (!m_jfa_base)
-  {
-    m_y.resize(y.extent(0));
-    m_z.resize(z.extent(0));
-  }
-  setY(y);
-  setZ(z);
-  // update cache
-  updateCache();
-  resizeTmp();
-}
-
-
-void bob::learn::misc::JFAMachine::setY(const blitz::Array<double,1>& y)
-{
-  if(y.extent(0) != m_y.extent(0)) { //checks dimension
-    boost::format m("size of input vector `y' (%d) does not match the expected size (%d)");
-    m % y.extent(0) % m_y.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  m_y.reference(bob::core::array::ccopy(y));
-  // update cache
-  updateCache();
-}
-
-void bob::learn::misc::JFAMachine::setZ(const blitz::Array<double,1>& z)
-{
-  if(z.extent(0) != m_z.extent(0)) { //checks dimension
-    boost::format m("size of input vector `z' (%d) does not match the expected size (%d)");
-    m % z.extent(0) % m_z.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  m_z.reference(bob::core::array::ccopy(z));
-  // update cache
-  updateCache();
-}
-
-void bob::learn::misc::JFAMachine::setJFABase(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base)
-{
-  if (!jfa_base->getUbm())
-    throw std::runtime_error("No UBM was set in the JFA machine.");
-  m_jfa_base = jfa_base;
-  // Resize variables
-  resize();
-}
-
-void bob::learn::misc::JFAMachine::resize()
-{
-  m_y.resizeAndPreserve(getDimRv());
-  m_z.resizeAndPreserve(getSupervectorLength());
-  updateCache();
-  resizeTmp();
-}
-
-void bob::learn::misc::JFAMachine::resizeTmp()
-{
-  if (m_jfa_base)
-  {
-    m_tmp_Ux.resize(getSupervectorLength());
-  }
-}
-
-void bob::learn::misc::JFAMachine::updateCache()
-{
-  if (m_jfa_base)
-  {
-    // m + Vy + Dz
-    m_cache_mVyDz.resize(getSupervectorLength());
-    bob::math::prod(m_jfa_base->getV(), m_y, m_cache_mVyDz);
-    m_cache_mVyDz += m_jfa_base->getD()*m_z + m_jfa_base->getUbm()->getMeanSupervector();
-    m_cache_x.resize(getDimRu());
-  }
-}
-
-void bob::learn::misc::JFAMachine::estimateUx(const bob::learn::misc::GMMStats& gmm_stats,
-  blitz::Array<double,1>& Ux)
-{
-  estimateX(gmm_stats, m_cache_x);
-  bob::math::prod(m_jfa_base->getU(), m_cache_x, Ux);
-}
-
-double bob::learn::misc::JFAMachine::forward(const bob::learn::misc::GMMStats& input)
-{
-  return forward_(input);
-}
-
-double bob::learn::misc::JFAMachine::forward(const bob::learn::misc::GMMStats& gmm_stats,
-  const blitz::Array<double,1>& Ux)
-{
-  // Checks that a Base machine has been set
-  if (!m_jfa_base) throw std::runtime_error("No UBM was set in the JFA machine.");
-
-  return bob::learn::misc::linearScoring(m_cache_mVyDz,
-            m_jfa_base->getUbm()->getMeanSupervector(), m_jfa_base->getUbm()->getVarianceSupervector(),
-            gmm_stats, Ux, true);
-}
-
-double bob::learn::misc::JFAMachine::forward_(const bob::learn::misc::GMMStats& input)
-{
-  // Checks that a Base machine has been set
-  if (!m_jfa_base) throw std::runtime_error("No UBM was set in the JFA machine.");
-
-  // Ux and GMMStats
-  estimateX(input, m_cache_x);
-  bob::math::prod(m_jfa_base->getU(), m_cache_x, m_tmp_Ux);
-
-  return bob::learn::misc::linearScoring(m_cache_mVyDz,
-            m_jfa_base->getUbm()->getMeanSupervector(), m_jfa_base->getUbm()->getVarianceSupervector(),
-            input, m_tmp_Ux, true);
-}
-
diff --git a/bob/learn/misc/cpp/JFATrainer.cpp b/bob/learn/misc/cpp/JFATrainer.cpp
deleted file mode 100644
index ecb23a2b0b3e6a64bf6af871b4b15d7781a70d0d..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/JFATrainer.cpp
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * @date Tue Jul 19 12:16:17 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Joint Factor Analysis Trainer
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/JFATrainer.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/array_random.h>
-#include <bob.math/inv.h>
-#include <bob.math/linear.h>
-#include <bob.core/check.h>
-#include <bob.core/array_repmat.h>
-#include <algorithm>
-
-
-//////////////////////////// JFATrainer ///////////////////////////
-bob::learn::misc::JFATrainer::JFATrainer():
-  m_rng(new boost::mt19937())
-{}
-
-bob::learn::misc::JFATrainer::JFATrainer(const bob::learn::misc::JFATrainer& other):
- m_rng(other.m_rng)
-{}
-
-bob::learn::misc::JFATrainer::~JFATrainer()
-{}
-
-bob::learn::misc::JFATrainer& bob::learn::misc::JFATrainer::operator=
-(const bob::learn::misc::JFATrainer& other)
-{
-  if (this != &other)
-  {
-    //m_max_iterations = other.m_max_iterations;
-    m_rng = other.m_rng;
-  }
-  return *this;
-}
-
-bool bob::learn::misc::JFATrainer::operator==(const bob::learn::misc::JFATrainer& b) const
-{
-  //return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
-  return *m_rng == *(b.m_rng);
-}
-
-bool bob::learn::misc::JFATrainer::operator!=(const bob::learn::misc::JFATrainer& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::JFATrainer::is_similar_to(const bob::learn::misc::JFATrainer& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  //return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
-  return *m_rng == *(b.m_rng);
-}
-
-void bob::learn::misc::JFATrainer::initialize(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
-  m_base_trainer.initializeXYZ(ar);
-
-  blitz::Array<double,2>& U = machine.updateU();
-  bob::core::array::randn(*m_rng, U);
-  blitz::Array<double,2>& V = machine.updateV();
-  bob::core::array::randn(*m_rng, V);
-  blitz::Array<double,1>& D = machine.updateD();
-  bob::core::array::randn(*m_rng, D);
-  machine.precompute();
-}
-
-void bob::learn::misc::JFATrainer::eStep1(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateY(base, ar);
-  m_base_trainer.computeAccumulatorsV(base, ar);
-}
-
-void bob::learn::misc::JFATrainer::mStep1(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  blitz::Array<double,2>& V = machine.updateV();
-  m_base_trainer.updateV(V);
-}
-
-void bob::learn::misc::JFATrainer::finalize1(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateY(base, ar);
-}
-
-
-void bob::learn::misc::JFATrainer::eStep2(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateX(base, ar);
-  m_base_trainer.computeAccumulatorsU(base, ar);
-}
-
-void bob::learn::misc::JFATrainer::mStep2(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  blitz::Array<double,2>& U = machine.updateU();
-  m_base_trainer.updateU(U);
-  machine.precompute();
-}
-
-void bob::learn::misc::JFATrainer::finalize2(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateX(base, ar);
-}
-
-
-void bob::learn::misc::JFATrainer::eStep3(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateZ(base, ar);
-  m_base_trainer.computeAccumulatorsD(base, ar);
-}
-
-void bob::learn::misc::JFATrainer::mStep3(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  blitz::Array<double,1>& d = machine.updateD();
-  m_base_trainer.updateD(d);
-}
-
-void bob::learn::misc::JFATrainer::finalize3(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-}
-
-/*
-void bob::learn::misc::JFATrainer::train_loop(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  // V subspace
-  for (size_t i=0; i<m_max_iterations; ++i) {
-    eStep1(machine, ar);
-    mStep1(machine, ar);
-  }
-  finalize1(machine, ar);
-  // U subspace
-  for (size_t i=0; i<m_max_iterations; ++i) {
-    eStep2(machine, ar);
-    mStep2(machine, ar);
-  }
-  finalize2(machine, ar);
-  // d subspace
-  for (size_t i=0; i<m_max_iterations; ++i) {
-    eStep3(machine, ar);
-    mStep3(machine, ar);
-  }
-  finalize3(machine, ar);
-}*/
-
-/*
-void bob::learn::misc::JFATrainer::train(bob::learn::misc::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  initialize(machine, ar);
-  train_loop(machine, ar);
-}
-*/
-
-void bob::learn::misc::JFATrainer::enrol(bob::learn::misc::JFAMachine& machine,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
-  const size_t n_iter)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > vvec;
-  vvec.push_back(ar);
-
-  const bob::learn::misc::FABase& fb = machine.getJFABase()->getBase();
-
-  m_base_trainer.initUbmNidSumStatistics(fb, vvec);
-  m_base_trainer.initializeXYZ(vvec);
-
-  for (size_t i=0; i<n_iter; ++i) {
-    m_base_trainer.updateY(fb, vvec);
-    m_base_trainer.updateX(fb, vvec);
-    m_base_trainer.updateZ(fb, vvec);
-  }
-
-  const blitz::Array<double,1> y(m_base_trainer.getY()[0]);
-  const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
-  machine.setY(y);
-  machine.setZ(z);
-}
-
diff --git a/bob/learn/misc/cpp/KMeansMachine.cpp b/bob/learn/misc/cpp/KMeansMachine.cpp
deleted file mode 100644
index 7b4a5a20347e09b4bd332e11e271cb6f0d9fe28b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/KMeansMachine.cpp
+++ /dev/null
@@ -1,258 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/KMeansMachine.h>
-
-#include <bob.core/assert.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <limits>
-
-bob::learn::misc::KMeansMachine::KMeansMachine():
-  m_n_means(0), m_n_inputs(0), m_means(0,0),
-  m_cache_means(0,0)
-{
-  m_means = 0;
-}
-
-bob::learn::misc::KMeansMachine::KMeansMachine(const size_t n_means, const size_t n_inputs):
-  m_n_means(n_means), m_n_inputs(n_inputs), m_means(n_means, n_inputs),
-  m_cache_means(n_means, n_inputs)
-{
-  m_means = 0;
-}
-
-bob::learn::misc::KMeansMachine::KMeansMachine(const blitz::Array<double,2>& means):
-  m_n_means(means.extent(0)), m_n_inputs(means.extent(1)),
-  m_means(bob::core::array::ccopy(means)),
-  m_cache_means(means.shape())
-{
-}
-
-bob::learn::misc::KMeansMachine::KMeansMachine(const bob::learn::misc::KMeansMachine& other):
-  m_n_means(other.m_n_means), m_n_inputs(other.m_n_inputs),
-  m_means(bob::core::array::ccopy(other.m_means)),
-  m_cache_means(other.m_cache_means.shape())
-{
-}
-
-bob::learn::misc::KMeansMachine::KMeansMachine(bob::io::base::HDF5File& config)
-{
-  load(config);
-}
-
-bob::learn::misc::KMeansMachine::~KMeansMachine() { }
-
-bob::learn::misc::KMeansMachine& bob::learn::misc::KMeansMachine::operator=
-(const bob::learn::misc::KMeansMachine& other)
-{
-  if(this != &other)
-  {
-    m_n_means = other.m_n_means;
-    m_n_inputs = other.m_n_inputs;
-    m_means.reference(bob::core::array::ccopy(other.m_means));
-    m_cache_means.resize(other.m_means.shape());
-  }
-  return *this;
-}
-
-bool bob::learn::misc::KMeansMachine::operator==(const bob::learn::misc::KMeansMachine& b) const
-{
-  return m_n_inputs == b.m_n_inputs && m_n_means == b.m_n_means &&
-         bob::core::array::isEqual(m_means, b.m_means);
-}
-
-bool bob::learn::misc::KMeansMachine::operator!=(const bob::learn::misc::KMeansMachine& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::KMeansMachine::is_similar_to(const bob::learn::misc::KMeansMachine& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  return m_n_inputs == b.m_n_inputs && m_n_means == b.m_n_means &&
-         bob::core::array::isClose(m_means, b.m_means, r_epsilon, a_epsilon);
-}
-
-void bob::learn::misc::KMeansMachine::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  m_means.reference(config.readArray<double,2>("means"));
-  m_n_means = m_means.extent(0);
-  m_n_inputs = m_means.extent(1);
-  m_cache_means.resize(m_n_means, m_n_inputs);
-}
-
-void bob::learn::misc::KMeansMachine::save(bob::io::base::HDF5File& config) const
-{
-  config.setArray("means", m_means);
-}
-
-void bob::learn::misc::KMeansMachine::setMeans(const blitz::Array<double,2> &means)
-{
-  bob::core::array::assertSameShape(means, m_means);
-  m_means = means;
-}
-
-void bob::learn::misc::KMeansMachine::setMean(const size_t i, const blitz::Array<double,1> &mean)
-{
-  if(i>=m_n_means) {
-    boost::format m("cannot set mean with index %lu: out of bounds [0,%lu[");
-    m % i % m_n_means;
-    throw std::runtime_error(m.str());
-  }
-  bob::core::array::assertSameDimensionLength(mean.extent(0), m_means.extent(1));
-  m_means(i,blitz::Range::all()) = mean;
-}
-
-const blitz::Array<double,1> bob::learn::misc::KMeansMachine::getMean(const size_t i) const
-{
-  if(i>=m_n_means) {
-    boost::format m("cannot get mean with index %lu: out of bounds [0,%lu[");
-    m % i % m_n_means;
-    throw std::runtime_error(m.str());
-  }
-
-  return m_means(i,blitz::Range::all());
-
-}
-
-double bob::learn::misc::KMeansMachine::getDistanceFromMean(const blitz::Array<double,1> &x,
-  const size_t i) const
-{
-  return blitz::sum(blitz::pow2(m_means(i,blitz::Range::all()) - x));
-}
-
-void bob::learn::misc::KMeansMachine::getClosestMean(const blitz::Array<double,1> &x,
-  size_t &closest_mean, double &min_distance) const
-{
-  min_distance = std::numeric_limits<double>::max();
-
-  for(size_t i=0; i<m_n_means; ++i) {
-    double this_distance = getDistanceFromMean(x,i);
-    if(this_distance < min_distance) {
-      min_distance = this_distance;
-      closest_mean = i;
-    }
-  }
-}
-
-double bob::learn::misc::KMeansMachine::getMinDistance(const blitz::Array<double,1>& input) const
-{
-  size_t closest_mean = 0;
-  double min_distance = 0;
-  getClosestMean(input,closest_mean,min_distance);
-  return min_distance;
-}
-
-void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachClusterInit(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
-{
-  // check arguments
-  bob::core::array::assertSameShape(variances, m_means);
-  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
-
-  // initialise output arrays
-  bob::core::array::assertSameShape(variances, m_means);
-  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
-  variances = 0;
-  weights = 0;
-
-  // initialise (temporary) mean array
-  m_cache_means = 0;
-}
-
-void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachClusterAcc(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
-{
-  // check arguments
-  bob::core::array::assertSameShape(variances, m_means);
-  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
-
-  // iterate over data
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<data.extent(0); ++i) {
-    // - get example
-    blitz::Array<double,1> x(data(i,a));
-
-    // - find closest mean
-    size_t closest_mean = 0;
-    double min_distance = 0;
-    getClosestMean(x,closest_mean,min_distance);
-
-    // - accumulate stats
-    m_cache_means(closest_mean, blitz::Range::all()) += x;
-    variances(closest_mean, blitz::Range::all()) += blitz::pow2(x);
-    ++weights(closest_mean);
-  }
-}
-
-void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachClusterFin(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
-{
-  // check arguments
-  bob::core::array::assertSameShape(variances, m_means);
-  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
-
-  // calculate final variances and weights
-  blitz::firstIndex idx1;
-  blitz::secondIndex idx2;
-
-  // find means
-  m_cache_means = m_cache_means(idx1,idx2) / weights(idx1);
-
-  // find variances
-  variances = variances(idx1,idx2) / weights(idx1);
-  variances -= blitz::pow2(m_cache_means);
-
-  // find weights
-  weights = weights / blitz::sum(weights);
-}
-
-void bob::learn::misc::KMeansMachine::setCacheMeans(const blitz::Array<double,2> &cache_means)
-{
-  bob::core::array::assertSameShape(cache_means, m_cache_means);
-  m_cache_means = cache_means;
-}
-
-void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachCluster(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
-{
-  // initialise
-  getVariancesAndWeightsForEachClusterInit(variances, weights);
-  // accumulate
-  getVariancesAndWeightsForEachClusterAcc(data, variances, weights);
-  // merge/finalize
-  getVariancesAndWeightsForEachClusterFin(variances, weights);
-}
-
-void bob::learn::misc::KMeansMachine::forward(const blitz::Array<double,1>& input, double& output) const
-{
-  if(static_cast<size_t>(input.extent(0)) != m_n_inputs) {
-    boost::format m("machine input size (%u) does not match the size of input array (%d)");
-    m % m_n_inputs % input.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  forward_(input,output);
-}
-
-void bob::learn::misc::KMeansMachine::forward_(const blitz::Array<double,1>& input, double& output) const
-{
-  output = getMinDistance(input);
-}
-
-void bob::learn::misc::KMeansMachine::resize(const size_t n_means, const size_t n_inputs)
-{
-  m_n_means = n_means;
-  m_n_inputs = n_inputs;
-  m_means.resizeAndPreserve(n_means, n_inputs);
-  m_cache_means.resizeAndPreserve(n_means, n_inputs);
-}
-
-namespace bob { namespace learn { namespace misc {
-  std::ostream& operator<<(std::ostream& os, const KMeansMachine& km) {
-    os << "Means = " << km.m_means << std::endl;
-    return os;
-  }
-} } }
diff --git a/bob/learn/misc/cpp/KMeansTrainer.cpp b/bob/learn/misc/cpp/KMeansTrainer.cpp
deleted file mode 100644
index 36c628c1d16faa0ce66dabe81c403d18ad3c49ea..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/KMeansTrainer.cpp
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/KMeansTrainer.h>
-#include <bob.core/array_copy.h>
-
-#include <boost/random.hpp>
-#include <bob.core/random.h>
-
-
-bob::learn::misc::KMeansTrainer::KMeansTrainer(InitializationMethod i_m):
-m_rng(new boost::mt19937()),
-m_average_min_distance(0),
-m_zeroethOrderStats(0),
-m_firstOrderStats(0)
-{
-  m_initialization_method = i_m;
-}
-
-
-bob::learn::misc::KMeansTrainer::KMeansTrainer(const bob::learn::misc::KMeansTrainer& other){
-    
-  m_initialization_method = other.m_initialization_method;  
-  m_rng                   = other.m_rng;
-  m_average_min_distance  = other.m_average_min_distance;
-  m_zeroethOrderStats     = bob::core::array::ccopy(other.m_zeroethOrderStats); 
-  m_firstOrderStats       = bob::core::array::ccopy(other.m_firstOrderStats);
-}
-
-
-bob::learn::misc::KMeansTrainer& bob::learn::misc::KMeansTrainer::operator=
-(const bob::learn::misc::KMeansTrainer& other)
-{
-  if(this != &other)
-  {
-    m_rng                         = other.m_rng;
-    m_initialization_method       = other.m_initialization_method;
-    m_average_min_distance        = other.m_average_min_distance;
-
-    m_zeroethOrderStats = bob::core::array::ccopy(other.m_zeroethOrderStats);
-    m_firstOrderStats   = bob::core::array::ccopy(other.m_firstOrderStats);
-  }
-  return *this;
-}
-
-
-bool bob::learn::misc::KMeansTrainer::operator==(const bob::learn::misc::KMeansTrainer& b) const {
-  return
-         m_initialization_method == b.m_initialization_method &&
-         *m_rng == *(b.m_rng) && m_average_min_distance == b.m_average_min_distance &&
-         bob::core::array::hasSameShape(m_zeroethOrderStats, b.m_zeroethOrderStats) &&
-         bob::core::array::hasSameShape(m_firstOrderStats, b.m_firstOrderStats) &&
-         blitz::all(m_zeroethOrderStats == b.m_zeroethOrderStats) &&
-         blitz::all(m_firstOrderStats == b.m_firstOrderStats);
-}
-
-bool bob::learn::misc::KMeansTrainer::operator!=(const bob::learn::misc::KMeansTrainer& b) const {
-  return !(this->operator==(b));
-}
-
-void bob::learn::misc::KMeansTrainer::initialize(bob::learn::misc::KMeansMachine& kmeans,
-  const blitz::Array<double,2>& ar)
-{
-  // split data into as many chunks as there are means
-  size_t n_data = ar.extent(0);
-
-  // assign the i'th mean to a random example within the i'th chunk
-  blitz::Range a = blitz::Range::all();
-  if(m_initialization_method == RANDOM || m_initialization_method == RANDOM_NO_DUPLICATE) // Random initialization
-  {
-    unsigned int n_chunk = n_data / kmeans.getNMeans();
-    size_t n_max_trials = (size_t)n_chunk * 5;
-    blitz::Array<double,1> cur_mean;
-    if(m_initialization_method == RANDOM_NO_DUPLICATE)
-      cur_mean.resize(kmeans.getNInputs());
-
-    for(size_t i=0; i<kmeans.getNMeans(); ++i)
-    {
-      boost::uniform_int<> die(i*n_chunk, (i+1)*n_chunk-1);
-
-      // get random index within chunk
-      unsigned int index = die(*m_rng);
-
-      // get the example at that index
-      blitz::Array<double, 1> mean = ar(index,a);
-
-      if(m_initialization_method == RANDOM_NO_DUPLICATE)
-      {
-        size_t count = 0;
-        while(count < n_max_trials)
-        {
-          // check that the selected sampled is different than all the previously
-          // selected ones
-          bool valid = true;
-          for(size_t j=0; j<i && valid; ++j)
-          {
-            cur_mean = kmeans.getMean(j);
-            valid = blitz::any(mean != cur_mean);
-          }
-          // if different, stop otherwise, try with another one
-          if(valid)
-            break;
-          else
-          {
-            index = die(*m_rng);
-            mean = ar(index,a);
-            ++count;
-          }
-        }
-        // Initialization fails
-        if(count >= n_max_trials) {
-          boost::format m("initialization failure: surpassed the maximum number of trials (%u)");
-          m % n_max_trials;
-          throw std::runtime_error(m.str());
-        }
-      }
-
-      // set the mean
-      kmeans.setMean(i, mean);
-    }
-  }
-  else // K-Means++
-  {
-    // 1.a. Selects one sample randomly
-    boost::uniform_int<> die(0, n_data-1);
-    //   Gets the example at a random index
-    blitz::Array<double,1> mean = ar(die(*m_rng),a);
-    kmeans.setMean(0, mean);
-
-    // 1.b. Loops, computes probability distribution and select samples accordingly
-    blitz::Array<double,1> weights(n_data);
-    for(size_t m=1; m<kmeans.getNMeans(); ++m)
-    {
-      // For each sample, puts the distance to the closest mean in the weight vector
-      for(size_t s=0; s<n_data; ++s)
-      {
-        blitz::Array<double,1> s_cur = ar(s,a);
-        double& w_cur = weights(s);
-        // Initializes with the distance to first mean
-        w_cur = kmeans.getDistanceFromMean(s_cur, 0);
-        // Loops over the remaining mean and update the mean distance if required
-        for(size_t i=1; i<m; ++i)
-          w_cur = std::min(w_cur, kmeans.getDistanceFromMean(s_cur, i));
-      }
-      // Square and normalize the weights vectors such that
-      // \f$weights[x] = D(x)^{2} \sum_{y} D(y)^{2}\f$
-      weights = blitz::pow2(weights);
-      weights /= blitz::sum(weights);
-
-      // Takes a sample according to the weights distribution
-      // Blitz iterators is fine as the weights array should be C-style contiguous
-      bob::core::array::assertCContiguous(weights);
-      bob::core::random::discrete_distribution<> die2(weights.begin(), weights.end());
-      blitz::Array<double,1> new_mean = ar(die2(*m_rng),a);
-      kmeans.setMean(m, new_mean);
-    }
-  }
-   // Resize the accumulator
-  m_zeroethOrderStats.resize(kmeans.getNMeans());
-  m_firstOrderStats.resize(kmeans.getNMeans(), kmeans.getNInputs());
-}
-
-void bob::learn::misc::KMeansTrainer::eStep(bob::learn::misc::KMeansMachine& kmeans,
-  const blitz::Array<double,2>& ar)
-{
-  // initialise the accumulators
-  resetAccumulators(kmeans);
-
-  // iterate over data samples
-  blitz::Range a = blitz::Range::all();
-  for(int i=0; i<ar.extent(0); ++i) {
-    // get example
-    blitz::Array<double, 1> x(ar(i,a));
-
-    // find closest mean, and distance from that mean
-    size_t closest_mean = 0;
-    double min_distance = 0;
-    kmeans.getClosestMean(x,closest_mean,min_distance);
-
-    // accumulate the stats
-    m_average_min_distance += min_distance;
-    ++m_zeroethOrderStats(closest_mean);
-    m_firstOrderStats(closest_mean,blitz::Range::all()) += x;
-  }
-  m_average_min_distance /= static_cast<double>(ar.extent(0));
-}
-
-void bob::learn::misc::KMeansTrainer::mStep(bob::learn::misc::KMeansMachine& kmeans)
-{
-  blitz::Array<double,2>& means = kmeans.updateMeans();
-  for(size_t i=0; i<kmeans.getNMeans(); ++i)
-  {
-    means(i,blitz::Range::all()) =
-      m_firstOrderStats(i,blitz::Range::all()) / m_zeroethOrderStats(i);
-  }
-}
-
-double bob::learn::misc::KMeansTrainer::computeLikelihood(bob::learn::misc::KMeansMachine& kmeans)
-{
-  return m_average_min_distance;
-}
-
-
-bool bob::learn::misc::KMeansTrainer::resetAccumulators(bob::learn::misc::KMeansMachine& kmeans)
-{
-  m_average_min_distance = 0;
-  m_zeroethOrderStats = 0;
-  m_firstOrderStats = 0;
-  return true;
-}
-
-void bob::learn::misc::KMeansTrainer::setZeroethOrderStats(const blitz::Array<double,1>& zeroethOrderStats)
-{
-  bob::core::array::assertSameShape(m_zeroethOrderStats, zeroethOrderStats);
-  m_zeroethOrderStats = zeroethOrderStats;
-}
-
-void bob::learn::misc::KMeansTrainer::setFirstOrderStats(const blitz::Array<double,2>& firstOrderStats)
-{
-  bob::core::array::assertSameShape(m_firstOrderStats, firstOrderStats);
-  m_firstOrderStats = firstOrderStats;
-}
-
diff --git a/bob/learn/misc/cpp/LinearScoring.cpp b/bob/learn/misc/cpp/LinearScoring.cpp
deleted file mode 100644
index e504f229ab8baae9e8c0488cb6898457eaa8e31e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/LinearScoring.cpp
+++ /dev/null
@@ -1,168 +0,0 @@
-/**
- * @date Wed Jul 13 16:00:04 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-#include <bob.learn.misc/LinearScoring.h>
-#include <bob.math/linear.h>
-
-
-static void _linearScoring(const std::vector<blitz::Array<double,1> >& models,
-                   const blitz::Array<double,1>& ubm_mean,
-                   const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const std::vector<blitz::Array<double,1> >* test_channelOffset,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double,2>& scores)
-{
-  int C = test_stats[0]->sumPx.extent(0);
-  int D = test_stats[0]->sumPx.extent(1);
-  int CD = C*D;
-  int Tt = test_stats.size();
-  int Tm = models.size();
-
-  // Check output size
-  bob::core::array::assertSameDimensionLength(scores.extent(0), models.size());
-  bob::core::array::assertSameDimensionLength(scores.extent(1), test_stats.size());
-
-  blitz::Array<double,2> A(Tm, CD);
-  blitz::Array<double,2> B(CD, Tt);
-
-  // 1) Compute A
-  for(int t=0; t<Tm; ++t) {
-    blitz::Array<double, 1> tmp = A(t, blitz::Range::all());
-    tmp = (models[t] - ubm_mean) / ubm_variance;
-  }
-
-  // 2) Compute B
-  if(test_channelOffset == 0) {
-    for(int t=0; t<Tt; ++t)
-      for(int s=0; s<CD; ++s)
-        B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (ubm_mean(s) * test_stats[t]->n(s/D));
-  }
-  else {
-    bob::core::array::assertSameDimensionLength((*test_channelOffset).size(), Tt);
-
-    for(int t=0; t<Tt; ++t) {
-      bob::core::array::assertSameDimensionLength((*test_channelOffset)[t].extent(0), CD);
-      for(int s=0; s<CD; ++s)
-        B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (test_stats[t]->n(s/D) * (ubm_mean(s) + (*test_channelOffset)[t](s)));
-    }
-  }
-
-  // Apply the normalisation if needed
-  if(frame_length_normalisation) {
-    for(int t=0; t<Tt; ++t) {
-      double sum_N = test_stats[t]->T;
-      blitz::Array<double, 1> v_t = B(blitz::Range::all(),t);
-
-      if (sum_N <= std::numeric_limits<double>::epsilon() && sum_N >= -std::numeric_limits<double>::epsilon())
-        v_t = 0;
-      else
-        v_t /= sum_N;
-    }
-  }
-
-  // 3) Compute LLR
-  bob::math::prod(A, B, scores);
-}
-
-
-void bob::learn::misc::linearScoring(const std::vector<blitz::Array<double,1> >& models,
-                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const std::vector<blitz::Array<double,1> >& test_channelOffset,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double, 2>& scores)
-{
-  _linearScoring(models, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
-}
-
-void bob::learn::misc::linearScoring(const std::vector<blitz::Array<double,1> >& models,
-                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double, 2>& scores)
-{
-  _linearScoring(models, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
-}
-
-void bob::learn::misc::linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
-                   const bob::learn::misc::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double, 2>& scores)
-{
-  int C = test_stats[0]->sumPx.extent(0);
-  int D = test_stats[0]->sumPx.extent(1);
-  int CD = C*D;
-  std::vector<blitz::Array<double,1> > models_b;
-  // Allocate and get the mean supervector
-  for(size_t i=0; i<models.size(); ++i) {
-    blitz::Array<double,1> mod(CD);
-    mod = models[i]->getMeanSupervector();
-    models_b.push_back(mod);
-  }
-  const blitz::Array<double,1>& ubm_mean = ubm.getMeanSupervector();
-  const blitz::Array<double,1>& ubm_variance = ubm.getVarianceSupervector();
-  _linearScoring(models_b, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
-}
-
-void bob::learn::misc::linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
-                   const bob::learn::misc::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const std::vector<blitz::Array<double,1> >& test_channelOffset,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double, 2>& scores)
-{
-  int C = test_stats[0]->sumPx.extent(0);
-  int D = test_stats[0]->sumPx.extent(1);
-  int CD = C*D;
-  std::vector<blitz::Array<double,1> > models_b;
-  // Allocate and get the mean supervector
-  for(size_t i=0; i<models.size(); ++i) {
-    blitz::Array<double,1> mod(CD);
-    mod = models[i]->getMeanSupervector();
-    models_b.push_back(mod);
-  }
-  const blitz::Array<double,1>& ubm_mean = ubm.getMeanSupervector();
-  const blitz::Array<double,1>& ubm_variance = ubm.getVarianceSupervector();
-  _linearScoring(models_b, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
-}
-
-
-
-double bob::learn::misc::linearScoring(const blitz::Array<double,1>& models,
-                     const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                     const bob::learn::misc::GMMStats& test_stats,
-                     const blitz::Array<double,1>& test_channelOffset,
-                     const bool frame_length_normalisation)
-{
-  int C = test_stats.sumPx.extent(0);
-  int D = test_stats.sumPx.extent(1);
-  int CD = C*D;
-
-
-  blitz::Array<double,1> A(CD);
-  blitz::Array<double,1> B(CD);
-
-  // 1) Compute A
-  A = (models - ubm_mean) / ubm_variance;
-
-  // 2) Compute B
-  for (int s=0; s<CD; ++s)
-    B(s) = test_stats.sumPx(s/D, s%D) - (test_stats.n(s/D) * (ubm_mean(s) + test_channelOffset(s)));
-
-  // Apply the normalisation if needed
-  if (frame_length_normalisation) {
-    double sum_N = test_stats.T;
-    if (sum_N == 0)
-      B = 0;
-    else
-      B /= sum_N;
-  }
-
-  return blitz::sum(A * B);
-}
-
diff --git a/bob/learn/misc/cpp/MAP_GMMTrainer.cpp b/bob/learn/misc/cpp/MAP_GMMTrainer.cpp
deleted file mode 100644
index d20b15096c340254f7fe0e37058cc532dbfb7929..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/MAP_GMMTrainer.cpp
+++ /dev/null
@@ -1,199 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/MAP_GMMTrainer.h>
-#include <bob.core/check.h>
-
-bob::learn::misc::MAP_GMMTrainer::MAP_GMMTrainer(
-   const bool update_means,
-   const bool update_variances,
-   const bool update_weights,
-   const double mean_var_update_responsibilities_threshold,
-
-   const bool reynolds_adaptation, 
-   const double relevance_factor, 
-   const double alpha,
-   boost::shared_ptr<bob::learn::misc::GMMMachine> prior_gmm):
-
-  m_gmm_base_trainer(update_means, update_variances, update_weights, mean_var_update_responsibilities_threshold),
-  m_prior_gmm(prior_gmm)
-{
-  m_reynolds_adaptation = reynolds_adaptation;
-  m_relevance_factor    = relevance_factor;
-  m_alpha               = alpha;
-}
-
-
-bob::learn::misc::MAP_GMMTrainer::MAP_GMMTrainer(const bob::learn::misc::MAP_GMMTrainer& b):
-  m_gmm_base_trainer(b.m_gmm_base_trainer),
-  m_prior_gmm(b.m_prior_gmm)
-{
-  m_relevance_factor    = b.m_relevance_factor;
-  m_alpha               = b.m_alpha; 
-  m_reynolds_adaptation = b.m_reynolds_adaptation;
-}
-
-bob::learn::misc::MAP_GMMTrainer::~MAP_GMMTrainer()
-{}
-
-void bob::learn::misc::MAP_GMMTrainer::initialize(bob::learn::misc::GMMMachine& gmm)
-{
-  // Check that the prior GMM has been specified
-  if (!m_prior_gmm)
-    throw std::runtime_error("MAP_GMMTrainer: Prior GMM distribution has not been set");
-
-  // Allocate memory for the sufficient statistics and initialise
-  m_gmm_base_trainer.initialize(gmm);
-
-  const size_t n_gaussians = gmm.getNGaussians();
-  // TODO: check size?
-  gmm.setWeights(m_prior_gmm->getWeights());
-  for(size_t i=0; i<n_gaussians; ++i)
-  {
-    gmm.getGaussian(i)->updateMean() = m_prior_gmm->getGaussian(i)->getMean();
-    gmm.getGaussian(i)->updateVariance() = m_prior_gmm->getGaussian(i)->getVariance();
-    gmm.getGaussian(i)->applyVarianceThresholds();
-  }
-  // Initializes cache
-  m_cache_alpha.resize(n_gaussians);
-  m_cache_ml_weights.resize(n_gaussians);
-}
-
-bool bob::learn::misc::MAP_GMMTrainer::setPriorGMM(boost::shared_ptr<bob::learn::misc::GMMMachine> prior_gmm)
-{
-  if (!prior_gmm) return false;
-  m_prior_gmm = prior_gmm;
-  return true;
-}
-
-
-void bob::learn::misc::MAP_GMMTrainer::mStep(bob::learn::misc::GMMMachine& gmm)
-{
-  // Read options and variables
-  double n_gaussians = gmm.getNGaussians();
-
-  // Check that the prior GMM has been specified
-  if (!m_prior_gmm)
-    throw std::runtime_error("MAP_GMMTrainer: Prior GMM distribution has not been set");
-
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-
-  // Calculate the "data-dependent adaptation coefficient", alpha_i
-  // TODO: check if required // m_cache_alpha.resize(n_gaussians);
-  if (!m_reynolds_adaptation)
-    m_cache_alpha = m_alpha;
-  else
-    m_cache_alpha = m_gmm_base_trainer.getGMMStats().n(i) / (m_gmm_base_trainer.getGMMStats().n(i) + m_relevance_factor);
-
-  // - Update weights if requested
-  //   Equation 11 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000
-  if (m_gmm_base_trainer.getUpdateWeights()) {
-    // Calculate the maximum likelihood weights
-    m_cache_ml_weights = m_gmm_base_trainer.getGMMStats().n / static_cast<double>(m_gmm_base_trainer.getGMMStats().T); //cast req. for linux/32-bits & osx
-
-    // Get the prior weights
-    const blitz::Array<double,1>& prior_weights = m_prior_gmm->getWeights();
-    blitz::Array<double,1>& new_weights = gmm.updateWeights();
-
-    // Calculate the new weights
-    new_weights = m_cache_alpha * m_cache_ml_weights + (1-m_cache_alpha) * prior_weights;
-
-    // Apply the scale factor, gamma, to ensure the new weights sum to unity
-    double gamma = blitz::sum(new_weights);
-    new_weights /= gamma;
-
-    // Recompute the log weights in the cache of the GMMMachine
-    gmm.recomputeLogWeights();
-  }
-
-  // Update GMM parameters
-  // - Update means if requested
-  //   Equation 12 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000
-  if (m_gmm_base_trainer.getUpdateMeans()) {
-    // Calculate new means
-    for (size_t i=0; i<n_gaussians; ++i) {
-      const blitz::Array<double,1>& prior_means = m_prior_gmm->getGaussian(i)->getMean();
-      blitz::Array<double,1>& means = gmm.getGaussian(i)->updateMean();
-      if (m_gmm_base_trainer.getGMMStats().n(i) < m_gmm_base_trainer.getMeanVarUpdateResponsibilitiesThreshold()) {
-        means = prior_means;
-      }
-      else {
-        // Use the maximum likelihood means
-        means = m_cache_alpha(i) * (m_gmm_base_trainer.getGMMStats().sumPx(i,blitz::Range::all()) / m_gmm_base_trainer.getGMMStats().n(i)) + (1-m_cache_alpha(i)) * prior_means;
-      }
-    }
-  }
-
-  // - Update variance if requested
-  //   Equation 13 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000
-  if (m_gmm_base_trainer.getUpdateVariances()) {
-    // Calculate new variances (equation 13)
-    for (size_t i=0; i<n_gaussians; ++i) {
-      const blitz::Array<double,1>& prior_means = m_prior_gmm->getGaussian(i)->getMean();
-      blitz::Array<double,1>& means = gmm.getGaussian(i)->updateMean();
-      const blitz::Array<double,1>& prior_variances = m_prior_gmm->getGaussian(i)->getVariance();
-      blitz::Array<double,1>& variances = gmm.getGaussian(i)->updateVariance();
-      if (m_gmm_base_trainer.getGMMStats().n(i) < m_gmm_base_trainer.getMeanVarUpdateResponsibilitiesThreshold()) {
-        variances = (prior_variances + prior_means) - blitz::pow2(means);
-      }
-      else {
-        variances = m_cache_alpha(i) * m_gmm_base_trainer.getGMMStats().sumPxx(i,blitz::Range::all()) / m_gmm_base_trainer.getGMMStats().n(i) + (1-m_cache_alpha(i)) * (prior_variances + prior_means) - blitz::pow2(means);
-      }
-      gmm.getGaussian(i)->applyVarianceThresholds();
-    }
-  }
-}
-
-
-
-bob::learn::misc::MAP_GMMTrainer& bob::learn::misc::MAP_GMMTrainer::operator=
-  (const bob::learn::misc::MAP_GMMTrainer &other)
-{
-  if (this != &other)
-  {
-    m_gmm_base_trainer    = other.m_gmm_base_trainer;
-    m_relevance_factor    = other.m_relevance_factor;
-    m_prior_gmm           = other.m_prior_gmm;
-    m_alpha               = other.m_alpha;
-    m_reynolds_adaptation = other.m_reynolds_adaptation;
-    m_cache_alpha.resize(other.m_cache_alpha.extent(0));
-    m_cache_ml_weights.resize(other.m_cache_ml_weights.extent(0));
-  }
-  return *this;
-}
-
-
-bool bob::learn::misc::MAP_GMMTrainer::operator==
-  (const bob::learn::misc::MAP_GMMTrainer &other) const
-{
-  return m_gmm_base_trainer    == other.m_gmm_base_trainer &&
-         m_relevance_factor    == other.m_relevance_factor &&
-         m_prior_gmm           == other.m_prior_gmm &&
-         m_alpha               == other.m_alpha &&
-         m_reynolds_adaptation == other.m_reynolds_adaptation;
-}
-
-
-bool bob::learn::misc::MAP_GMMTrainer::operator!=
-  (const bob::learn::misc::MAP_GMMTrainer &other) const
-{
-  return !(this->operator==(other));
-}
-
-
-bool bob::learn::misc::MAP_GMMTrainer::is_similar_to
-  (const bob::learn::misc::MAP_GMMTrainer &other, const double r_epsilon,
-   const double a_epsilon) const
-{
-  return //m_gmm_base_trainer.is_similar_to(other.m_gmm_base_trainer, r_epsilon, a_epsilon) &&
-         bob::core::isClose(m_relevance_factor, other.m_relevance_factor, r_epsilon, a_epsilon) &&
-         m_prior_gmm == other.m_prior_gmm &&
-         bob::core::isClose(m_alpha, other.m_alpha, r_epsilon, a_epsilon) &&
-         m_reynolds_adaptation == other.m_reynolds_adaptation;
-}
-
diff --git a/bob/learn/misc/cpp/ML_GMMTrainer.cpp b/bob/learn/misc/cpp/ML_GMMTrainer.cpp
deleted file mode 100644
index f08fb2f67737cad2f469e348fff85ea714072f30..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/ML_GMMTrainer.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/ML_GMMTrainer.h>
-#include <algorithm>
-
-bob::learn::misc::ML_GMMTrainer::ML_GMMTrainer(
-   const bool update_means,
-   const bool update_variances, 
-   const bool update_weights,
-   const double mean_var_update_responsibilities_threshold
-):
-  m_gmm_base_trainer(update_means, update_variances, update_weights, mean_var_update_responsibilities_threshold)
-{}
-
-
-
-bob::learn::misc::ML_GMMTrainer::ML_GMMTrainer(const bob::learn::misc::ML_GMMTrainer& b):
-  m_gmm_base_trainer(b.m_gmm_base_trainer)
-{}
-
-bob::learn::misc::ML_GMMTrainer::~ML_GMMTrainer()
-{}
-
-void bob::learn::misc::ML_GMMTrainer::initialize(bob::learn::misc::GMMMachine& gmm)
-{
-  m_gmm_base_trainer.initialize(gmm);
-  
-  // Allocate cache
-  size_t n_gaussians = gmm.getNGaussians();
-  m_cache_ss_n_thresholded.resize(n_gaussians);
-}
-
-
-void bob::learn::misc::ML_GMMTrainer::mStep(bob::learn::misc::GMMMachine& gmm)
-{
-  // Read options and variables
-  const size_t n_gaussians = gmm.getNGaussians();
-
-  // - Update weights if requested
-  //   Equation 9.26 of Bishop, "Pattern recognition and machine learning", 2006
-  if (m_gmm_base_trainer.getUpdateWeights()) {
-    blitz::Array<double,1>& weights = gmm.updateWeights();
-    weights = m_gmm_base_trainer.getGMMStats().n / static_cast<double>(m_gmm_base_trainer.getGMMStats().T); //cast req. for linux/32-bits & osx
-    // Recompute the log weights in the cache of the GMMMachine
-    gmm.recomputeLogWeights();
-  }
-
-  // Generate a thresholded version of m_ss.n
-  for(size_t i=0; i<n_gaussians; ++i)
-    m_cache_ss_n_thresholded(i) = std::max(m_gmm_base_trainer.getGMMStats().n(i), m_gmm_base_trainer.getMeanVarUpdateResponsibilitiesThreshold());
-
-  // Update GMM parameters using the sufficient statistics (m_ss)
-  // - Update means if requested
-  //   Equation 9.24 of Bishop, "Pattern recognition and machine learning", 2006
-  if (m_gmm_base_trainer.getUpdateMeans()) {
-    for(size_t i=0; i<n_gaussians; ++i) {
-      blitz::Array<double,1>& means = gmm.getGaussian(i)->updateMean();
-      means = m_gmm_base_trainer.getGMMStats().sumPx(i, blitz::Range::all()) / m_cache_ss_n_thresholded(i);
-    }
-  }
-
-  // - Update variance if requested
-  //   See Equation 9.25 of Bishop, "Pattern recognition and machine learning", 2006
-  //   ...but we use the "computational formula for the variance", i.e.
-  //   var = 1/n * sum (P(x-mean)(x-mean))
-  //       = 1/n * sum (Pxx) - mean^2
-  if (m_gmm_base_trainer.getUpdateVariances()) {
-    for(size_t i=0; i<n_gaussians; ++i) {
-      const blitz::Array<double,1>& means = gmm.getGaussian(i)->getMean();
-      blitz::Array<double,1>& variances = gmm.getGaussian(i)->updateVariance();
-      variances = m_gmm_base_trainer.getGMMStats().sumPxx(i, blitz::Range::all()) / m_cache_ss_n_thresholded(i) - blitz::pow2(means);
-      gmm.getGaussian(i)->applyVarianceThresholds();
-    }
-  }
-}
-
-bob::learn::misc::ML_GMMTrainer& bob::learn::misc::ML_GMMTrainer::operator=
-  (const bob::learn::misc::ML_GMMTrainer &other)
-{
-  if (this != &other)
-  {
-    m_gmm_base_trainer = other.m_gmm_base_trainer;
-    m_cache_ss_n_thresholded.resize(other.m_cache_ss_n_thresholded.extent(0));
-  }
-  return *this;
-}
-
-bool bob::learn::misc::ML_GMMTrainer::operator==
-  (const bob::learn::misc::ML_GMMTrainer &other) const
-{
-  return m_gmm_base_trainer == other.m_gmm_base_trainer;
-}
-
-bool bob::learn::misc::ML_GMMTrainer::operator!=
-  (const bob::learn::misc::ML_GMMTrainer &other) const
-{
-  return !(this->operator==(other));
-}
-
-/*
-bool bob::learn::misc::ML_GMMTrainer::is_similar_to
-  (const bob::learn::misc::ML_GMMTrainer &other, const double r_epsilon,
-   const double a_epsilon) const
-{
-  return m_gmm_base_trainer.is_similar_to(other, r_epsilon, a_epsilon);
-}
-*/
diff --git a/bob/learn/misc/cpp/PLDAMachine.cpp b/bob/learn/misc/cpp/PLDAMachine.cpp
deleted file mode 100644
index 4193c85591b799e718dd93fdafb7707ab6d8650e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/PLDAMachine.cpp
+++ /dev/null
@@ -1,960 +0,0 @@
-/**
- * @date Fri Oct 14 18:07:56 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Machines that implements the PLDA model
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.core/assert.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <bob.learn.misc/PLDAMachine.h>
-#include <bob.math/linear.h>
-#include <bob.math/det.h>
-#include <bob.math/inv.h>
-
-#include <cmath>
-#include <boost/lexical_cast.hpp>
-#include <string>
-
-bob::learn::misc::PLDABase::PLDABase():
-  m_variance_threshold(0.)
-{
-  resizeNoInit(0, 0, 0);
-}
-
-bob::learn::misc::PLDABase::PLDABase(const size_t dim_d, const size_t dim_f,
-    const size_t dim_g, const double variance_threshold):
-  m_variance_threshold(variance_threshold)
-{
-  resize(dim_d, dim_f, dim_g);
-}
-
-
-bob::learn::misc::PLDABase::PLDABase(const bob::learn::misc::PLDABase& other):
-  m_dim_d(other.m_dim_d),
-  m_dim_f(other.m_dim_f),
-  m_dim_g(other.m_dim_g),
-  m_F(bob::core::array::ccopy(other.m_F)),
-  m_G(bob::core::array::ccopy(other.m_G)),
-  m_sigma(bob::core::array::ccopy(other.m_sigma)),
-  m_mu(bob::core::array::ccopy(other.m_mu)),
-  m_variance_threshold(other.m_variance_threshold),
-  m_cache_isigma(bob::core::array::ccopy(other.m_cache_isigma)),
-  m_cache_alpha(bob::core::array::ccopy(other.m_cache_alpha)),
-  m_cache_beta(bob::core::array::ccopy(other.m_cache_beta)),
-  m_cache_gamma(),
-  m_cache_Ft_beta(bob::core::array::ccopy(other.m_cache_Ft_beta)),
-  m_cache_Gt_isigma(bob::core::array::ccopy(other.m_cache_Gt_isigma)),
-  m_cache_logdet_alpha(other.m_cache_logdet_alpha),
-  m_cache_logdet_sigma(other.m_cache_logdet_sigma),
-  m_cache_loglike_constterm(other.m_cache_loglike_constterm)
-{
-  bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
-  resizeTmp();
-}
-
-bob::learn::misc::PLDABase::PLDABase(bob::io::base::HDF5File& config) {
-  load(config);
-}
-
-bob::learn::misc::PLDABase::~PLDABase() {
-}
-
-bob::learn::misc::PLDABase& bob::learn::misc::PLDABase::operator=
-    (const bob::learn::misc::PLDABase& other)
-{
-  if (this != &other)
-  {
-    m_dim_d = other.m_dim_d;
-    m_dim_f = other.m_dim_f;
-    m_dim_g = other.m_dim_g;
-    m_F.reference(bob::core::array::ccopy(other.m_F));
-    m_G.reference(bob::core::array::ccopy(other.m_G));
-    m_sigma.reference(bob::core::array::ccopy(other.m_sigma));
-    m_mu.reference(bob::core::array::ccopy(other.m_mu));
-    m_variance_threshold = other.m_variance_threshold;
-    m_cache_isigma.reference(bob::core::array::ccopy(other.m_cache_isigma));
-    m_cache_alpha.reference(bob::core::array::ccopy(other.m_cache_alpha));
-    m_cache_beta.reference(bob::core::array::ccopy(other.m_cache_beta));
-    bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
-    m_cache_Ft_beta.reference(bob::core::array::ccopy(other.m_cache_Ft_beta));
-    m_cache_Gt_isigma.reference(bob::core::array::ccopy(other.m_cache_Gt_isigma));
-    m_cache_logdet_alpha = other.m_cache_logdet_alpha;
-    m_cache_logdet_sigma = other.m_cache_logdet_sigma;
-    m_cache_loglike_constterm = other.m_cache_loglike_constterm;
-    resizeTmp();
-  }
-  return *this;
-}
-
-bool bob::learn::misc::PLDABase::operator==
-    (const bob::learn::misc::PLDABase& b) const
-{
-  if (!(m_dim_d == b.m_dim_d && m_dim_f == b.m_dim_f &&
-        m_dim_g == b.m_dim_g &&
-        bob::core::array::isEqual(m_F, b.m_F) &&
-        bob::core::array::isEqual(m_G, b.m_G) &&
-        bob::core::array::isEqual(m_sigma, b.m_sigma) &&
-        bob::core::array::isEqual(m_mu, b.m_mu) &&
-        m_variance_threshold == b.m_variance_threshold &&
-        bob::core::array::isEqual(m_cache_isigma, b.m_cache_isigma) &&
-        bob::core::array::isEqual(m_cache_alpha, b.m_cache_alpha) &&
-        bob::core::array::isEqual(m_cache_beta, b.m_cache_beta) &&
-        bob::core::array::isEqual(m_cache_gamma, b.m_cache_gamma) &&
-        bob::core::array::isEqual(m_cache_Ft_beta, b.m_cache_Ft_beta) &&
-        bob::core::array::isEqual(m_cache_Gt_isigma, b.m_cache_Gt_isigma) &&
-        m_cache_logdet_alpha == b.m_cache_logdet_alpha &&
-        m_cache_logdet_sigma == b.m_cache_logdet_sigma))
-    return false;
-
-  // m_cache_loglike_constterm
-  if (this->m_cache_loglike_constterm.size() != b.m_cache_loglike_constterm.size())
-    return false;  // differing sizes, they are not the same
-  std::map<size_t, double>::const_iterator i, j;
-  for (i = this->m_cache_loglike_constterm.begin(), j = b.m_cache_loglike_constterm.begin();
-    i != this->m_cache_loglike_constterm.end(); ++i, ++j)
-  {
-    if (i->first != j->first || i->second != j->second)
-      return false;
-  }
-
-  return true;
-}
-
-bool bob::learn::misc::PLDABase::operator!=
-    (const bob::learn::misc::PLDABase& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::PLDABase::is_similar_to(const bob::learn::misc::PLDABase& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  return (m_dim_d == b.m_dim_d && m_dim_f == b.m_dim_f &&
-          m_dim_g == b.m_dim_g &&
-          bob::core::array::isClose(m_F, b.m_F, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_G, b.m_G, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_sigma, b.m_sigma, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_mu, b.m_mu, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_variance_threshold, b.m_variance_threshold, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_isigma, b.m_cache_isigma, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_alpha, b.m_cache_alpha, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_beta, b.m_cache_beta, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_gamma, b.m_cache_gamma, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_Ft_beta, b.m_cache_Ft_beta, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_Gt_isigma, b.m_cache_Gt_isigma, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_cache_logdet_alpha, b.m_cache_logdet_alpha, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_cache_logdet_sigma, b.m_cache_logdet_sigma, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_cache_loglike_constterm, b.m_cache_loglike_constterm));
-}
-
-void bob::learn::misc::PLDABase::load(bob::io::base::HDF5File& config)
-{
-  if (!config.contains("dim_d"))
-  {
-    // Then the model was saved using bob < 1.2.0
-    //reads all data directly into the member variables
-    m_F.reference(config.readArray<double,2>("F"));
-    m_G.reference(config.readArray<double,2>("G"));
-    m_dim_d = m_F.extent(0);
-    m_dim_f = m_F.extent(1);
-    m_dim_g = m_G.extent(1);
-    m_sigma.reference(config.readArray<double,1>("sigma"));
-    m_mu.reference(config.readArray<double,1>("mu"));
-    m_cache_isigma.resize(m_dim_d);
-    precomputeISigma();
-    m_variance_threshold = 0.;
-    m_cache_alpha.reference(config.readArray<double,2>("alpha"));
-    m_cache_beta.reference(config.readArray<double,2>("beta"));
-    // gamma and log like constant term (a-dependent terms)
-    if (config.contains("a_indices"))
-    {
-      blitz::Array<uint32_t, 1> a_indices;
-      a_indices.reference(config.readArray<uint32_t,1>("a_indices"));
-      for (int i=0; i<a_indices.extent(0); ++i)
-      {
-        std::string str1 = "gamma_" + boost::lexical_cast<std::string>(a_indices(i));
-        m_cache_gamma[a_indices(i)].reference(config.readArray<double,2>(str1));
-        std::string str2 = "loglikeconstterm_" + boost::lexical_cast<std::string>(a_indices(i));
-        m_cache_loglike_constterm[a_indices(i)] = config.read<double>(str2);
-      }
-    }
-    m_cache_Ft_beta.reference(config.readArray<double,2>("Ft_beta"));
-    m_cache_Gt_isigma.reference(config.readArray<double,2>("Gt_isigma"));
-    m_cache_logdet_alpha = config.read<double>("logdet_alpha");
-    m_cache_logdet_sigma = config.read<double>("logdet_sigma");
-  }
-  else
-  {
-    // Then the model was saved using bob >= 1.2.0
-    //reads all data directly into the member variables
-    m_F.reference(config.readArray<double,2>("F"));
-    m_G.reference(config.readArray<double,2>("G"));
-    // Conditional because previous versions had not these variables
-    m_dim_d = config.read<uint64_t>("dim_d");
-    m_dim_f = config.read<uint64_t>("dim_f");
-    m_dim_g = config.read<uint64_t>("dim_g");
-    m_sigma.reference(config.readArray<double,1>("sigma"));
-    m_mu.reference(config.readArray<double,1>("mu"));
-    m_cache_isigma.resize(m_dim_d);
-    precomputeISigma();
-    if (config.contains("variance_threshold"))
-      m_variance_threshold = config.read<double>("variance_threshold");
-    else if (config.contains("variance_thresholds")) // In case 1.2.0 alpha/beta version has been used
-    {
-      blitz::Array<double,1> tmp;
-      tmp.reference(config.readArray<double,1>("variance_thresholds"));
-      m_variance_threshold = tmp(0);
-    }
-    m_cache_alpha.reference(config.readArray<double,2>("alpha"));
-    m_cache_beta.reference(config.readArray<double,2>("beta"));
-    // gamma's (a-dependent terms)
-    if(config.contains("a_indices_gamma"))
-    {
-      blitz::Array<uint32_t, 1> a_indices;
-      a_indices.reference(config.readArray<uint32_t,1>("a_indices_gamma"));
-      for(int i=0; i<a_indices.extent(0); ++i)
-      {
-        std::string str = "gamma_" + boost::lexical_cast<std::string>(a_indices(i));
-        m_cache_gamma[a_indices(i)].reference(config.readArray<double,2>(str));
-      }
-    }
-    // log likelihood constant term's (a-dependent terms)
-    if(config.contains("a_indices_loglikeconstterm"))
-    {
-      blitz::Array<uint32_t, 1> a_indices;
-      a_indices.reference(config.readArray<uint32_t,1>("a_indices_loglikeconstterm"));
-      for(int i=0; i<a_indices.extent(0); ++i)
-      {
-        std::string str = "loglikeconstterm_" + boost::lexical_cast<std::string>(a_indices(i));
-        m_cache_loglike_constterm[a_indices(i)] = config.read<double>(str);
-      }
-    }
-    m_cache_Ft_beta.reference(config.readArray<double,2>("Ft_beta"));
-    m_cache_Gt_isigma.reference(config.readArray<double,2>("Gt_isigma"));
-    m_cache_logdet_alpha = config.read<double>("logdet_alpha");
-    m_cache_logdet_sigma = config.read<double>("logdet_sigma");
-  }
-  resizeTmp();
-}
-
-void bob::learn::misc::PLDABase::save(bob::io::base::HDF5File& config) const
-{
-  config.set("dim_d", (uint64_t)m_dim_d);
-  config.set("dim_f", (uint64_t)m_dim_f);
-  config.set("dim_g", (uint64_t)m_dim_g);
-  config.setArray("F", m_F);
-  config.setArray("G", m_G);
-  config.setArray("sigma", m_sigma);
-  config.setArray("mu", m_mu);
-  config.set("variance_threshold", m_variance_threshold);
-  config.setArray("alpha", m_cache_alpha);
-  config.setArray("beta", m_cache_beta);
-  // gamma's
-  if(m_cache_gamma.size() > 0)
-  {
-    blitz::Array<uint32_t, 1> a_indices(m_cache_gamma.size());
-    int i = 0;
-    for(std::map<size_t,blitz::Array<double,2> >::const_iterator
-        it=m_cache_gamma.begin(); it!=m_cache_gamma.end(); ++it)
-    {
-      a_indices(i) = it->first;
-      std::string str = "gamma_" + boost::lexical_cast<std::string>(it->first);
-      config.setArray(str, it->second);
-      ++i;
-    }
-    config.setArray("a_indices_gamma", a_indices);
-  }
-  // log likelihood constant terms
-  if(m_cache_loglike_constterm.size() > 0)
-  {
-    blitz::Array<uint32_t, 1> a_indices(m_cache_loglike_constterm.size());
-    int i = 0;
-    for(std::map<size_t,double>::const_iterator
-        it=m_cache_loglike_constterm.begin(); it!=m_cache_loglike_constterm.end(); ++it)
-    {
-      a_indices(i) = it->first;
-      std::string str = "loglikeconstterm_" + boost::lexical_cast<std::string>(it->first);
-      config.set(str, it->second);
-      ++i;
-    }
-    config.setArray("a_indices_loglikeconstterm", a_indices);
-  }
-
-  config.setArray("Ft_beta", m_cache_Ft_beta);
-  config.setArray("Gt_isigma", m_cache_Gt_isigma);
-  config.set("logdet_alpha", m_cache_logdet_alpha);
-  config.set("logdet_sigma", m_cache_logdet_sigma);
-}
-
-void bob::learn::misc::PLDABase::resizeNoInit(const size_t dim_d, const size_t dim_f,
-    const size_t dim_g)
-{
-  m_dim_d = dim_d;
-  m_dim_f = dim_f;
-  m_dim_g = dim_g;
-  m_F.resize(dim_d, dim_f);
-  m_G.resize(dim_d, dim_g);
-  m_sigma.resize(dim_d);
-  m_mu.resize(dim_d);
-  m_cache_alpha.resize(dim_g, dim_g);
-  m_cache_beta.resize(dim_d, dim_d);
-  m_cache_Ft_beta.resize(dim_f, dim_d);
-  m_cache_Gt_isigma.resize(dim_g, dim_d);
-  m_cache_gamma.clear();
-  m_cache_isigma.resize(dim_d);
-  m_cache_loglike_constterm.clear();
-  resizeTmp();
-}
-
-void bob::learn::misc::PLDABase::resizeTmp()
-{
-  m_tmp_d_1.resize(m_dim_d);
-  m_tmp_d_2.resize(m_dim_d);
-  m_tmp_d_ng_1.resize(m_dim_d, m_dim_g);
-  m_tmp_nf_nf_1.resize(m_dim_f, m_dim_f);
-  m_tmp_ng_ng_1.resize(m_dim_g, m_dim_g);
-}
-
-void bob::learn::misc::PLDABase::resize(const size_t dim_d, const size_t dim_f,
-    const size_t dim_g)
-{
-  resizeNoInit(dim_d, dim_f, dim_g);
-  initMuFGSigma();
-}
-
-void bob::learn::misc::PLDABase::setF(const blitz::Array<double,2>& F)
-{
-  bob::core::array::assertSameShape(F, m_F);
-  m_F.reference(bob::core::array::ccopy(F));
-  // Precomputes useful matrices
-  precompute();
-}
-
-void bob::learn::misc::PLDABase::setG(const blitz::Array<double,2>& G)
-{
-  bob::core::array::assertSameShape(G, m_G);
-  m_G.reference(bob::core::array::ccopy(G));
-  // Precomputes useful matrices and values
-  precompute();
-  precomputeLogDetAlpha();
-}
-
-void bob::learn::misc::PLDABase::setSigma(const blitz::Array<double,1>& sigma)
-{
-  bob::core::array::assertSameShape(sigma, m_sigma);
-  m_sigma.reference(bob::core::array::ccopy(sigma));
-  // Apply variance flooring threshold: This will also
-  // call the precompute() and precomputeLogLike() methods!
-  applyVarianceThreshold();
-}
-
-void bob::learn::misc::PLDABase::setMu(const blitz::Array<double,1>& mu)
-{
-  bob::core::array::assertSameShape(mu, m_mu);
-  m_mu.reference(bob::core::array::ccopy(mu));
-}
-
-void bob::learn::misc::PLDABase::setVarianceThreshold(const double value)
-{
-  // Variance flooring
-  m_variance_threshold = value;
-  // Apply variance flooring thresholds: This will also
-  // call the precompute() and precomputeLogLike() methods!
-  applyVarianceThreshold();
-}
-
-void bob::learn::misc::PLDABase::applyVarianceThreshold()
-{
-   // Apply variance flooring threshold
-  m_sigma = blitz::where( m_sigma < m_variance_threshold, m_variance_threshold, m_sigma);
-  // Re-compute constants, because m_sigma has changed
-  precompute();
-  precomputeLogLike();
-}
-
-const blitz::Array<double,2>& bob::learn::misc::PLDABase::getGamma(const size_t a) const
-{
-  if(!hasGamma(a))
-    throw std::runtime_error("Gamma for this number of samples is not currently in cache. You could use the getAddGamma() method instead");
-  return (m_cache_gamma.find(a))->second;
-}
-
-const blitz::Array<double,2>& bob::learn::misc::PLDABase::getAddGamma(const size_t a)
-{
-  if(!hasGamma(a)) precomputeGamma(a);
-  return m_cache_gamma[a];
-}
-
-void bob::learn::misc::PLDABase::initMuFGSigma()
-{
-  // To avoid problems related to precomputation
-  m_mu = 0.;
-  bob::math::eye(m_F);
-  bob::math::eye(m_G);
-  m_sigma = 1.;
-  // Precompute variables
-  precompute();
-  precomputeLogLike();
-}
-
-void bob::learn::misc::PLDABase::precompute()
-{
-  precomputeISigma();
-  precomputeGtISigma();
-  precomputeAlpha();
-  precomputeBeta();
-  m_cache_gamma.clear();
-  precomputeFtBeta();
-  m_cache_loglike_constterm.clear();
-}
-
-void bob::learn::misc::PLDABase::precomputeLogLike()
-{
-  precomputeLogDetAlpha();
-  precomputeLogDetSigma();
-}
-
-void bob::learn::misc::PLDABase::precomputeISigma()
-{
-  // Updates inverse of sigma
-  m_cache_isigma = 1. / m_sigma;
-}
-
-void bob::learn::misc::PLDABase::precomputeGtISigma()
-{
-  // m_cache_Gt_isigma = G^T \Sigma^{-1}
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  blitz::Array<double,2> Gt = m_G.transpose(1,0);
-  m_cache_Gt_isigma = Gt(i,j) * m_cache_isigma(j);
-}
-
-void bob::learn::misc::PLDABase::precomputeAlpha()
-{
-  // alpha = (Id + G^T.sigma^-1.G)^-1
-
-  // m_tmp_ng_ng_1 = G^T.sigma^-1.G
-  bob::math::prod(m_cache_Gt_isigma, m_G, m_tmp_ng_ng_1);
-  // m_tmp_ng_ng_1 = Id + G^T.sigma^-1.G
-  for(int i=0; i<m_tmp_ng_ng_1.extent(0); ++i) m_tmp_ng_ng_1(i,i) += 1;
-  // m_cache_alpha = (Id + G^T.sigma^-1.G)^-1
-  bob::math::inv(m_tmp_ng_ng_1, m_cache_alpha);
-}
-
-void bob::learn::misc::PLDABase::precomputeBeta()
-{
-  // beta = (sigma + G.G^T)^-1
-  // BUT, there is a more efficient computation (Woodbury identity):
-  // beta = sigma^-1 - sigma^-1.G.(Id + G^T.sigma^-1.G)^-1.G^T.sigma^-1
-  // beta =  sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1
-
-  blitz::Array<double,2> GtISigmaT = m_cache_Gt_isigma.transpose(1,0);
-  // m_tmp_d_ng_1 = sigma^-1.G.alpha
-  bob::math::prod(GtISigmaT, m_cache_alpha, m_tmp_d_ng_1);
-  // m_cache_beta = -sigma^-1.G.alpha.G^T.sigma^-1
-  bob::math::prod(m_tmp_d_ng_1, m_cache_Gt_isigma, m_cache_beta);
-  m_cache_beta = -m_cache_beta;
-  // m_cache_beta = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1
-  for(int i=0; i<m_cache_beta.extent(0); ++i) m_cache_beta(i,i) += m_cache_isigma(i);
-}
-
-void bob::learn::misc::PLDABase::precomputeGamma(const size_t a)
-{
-
-  blitz::Array<double,2> gamma_a(getDimF(),getDimF());
-  m_cache_gamma[a].reference(gamma_a);
-  computeGamma(a, gamma_a);
-}
-
-void bob::learn::misc::PLDABase::precomputeFtBeta()
-{
-  // m_cache_Ft_beta = F^T.beta = F^T.(sigma + G.G^T)^-1
-  blitz::Array<double,2> Ft = m_F.transpose(1,0);
-  bob::math::prod(Ft, m_cache_beta, m_cache_Ft_beta);
-}
-
-void bob::learn::misc::PLDABase::computeGamma(const size_t a,
-  blitz::Array<double,2> res) const
-{
-  // gamma = (Id + a.F^T.beta.F)^-1
-
-  // Checks destination size
-  bob::core::array::assertSameShape(res, m_tmp_nf_nf_1);
-  // m_tmp_nf_nf_1 = F^T.beta.F
-  bob::math::prod(m_cache_Ft_beta, m_F, m_tmp_nf_nf_1);
-   // m_tmp_nf_nf_1 = a.F^T.beta.F
-  m_tmp_nf_nf_1 *= static_cast<double>(a);
-  // m_tmp_nf_nf_1 = Id + a.F^T.beta.F
-  for(int i=0; i<m_tmp_nf_nf_1.extent(0); ++i) m_tmp_nf_nf_1(i,i) += 1;
-
-  // res = (Id + a.F^T.beta.F)^-1
-  bob::math::inv(m_tmp_nf_nf_1, res);
-}
-
-void bob::learn::misc::PLDABase::precomputeLogDetAlpha()
-{
-  int sign;
-  m_cache_logdet_alpha = bob::math::slogdet(m_cache_alpha, sign);
-}
-
-void bob::learn::misc::PLDABase::precomputeLogDetSigma()
-{
-  m_cache_logdet_sigma = blitz::sum(blitz::log(m_sigma));
-}
-
-double bob::learn::misc::PLDABase::computeLogLikeConstTerm(const size_t a,
-  const blitz::Array<double,2>& gamma_a) const
-{
-  // loglike_constterm[a] = a/2 *
-  //  ( -D*log(2*pi) -log|sigma| +log|alpha| +log|gamma_a|)
-  int sign;
-  double logdet_gamma_a = bob::math::slogdet(gamma_a, sign);
-  double ah = static_cast<double>(a)/2.;
-  double res = ( -ah*((double)m_dim_d)*log(2*M_PI) -
-      ah*m_cache_logdet_sigma + ah*m_cache_logdet_alpha + logdet_gamma_a/2.);
-  return res;
-}
-
-double bob::learn::misc::PLDABase::computeLogLikeConstTerm(const size_t a)
-{
-  const blitz::Array<double,2>& gamma_a = getAddGamma(a);
-  return computeLogLikeConstTerm(a, gamma_a);
-}
-
-void bob::learn::misc::PLDABase::precomputeLogLikeConstTerm(const size_t a)
-{
-  double val = computeLogLikeConstTerm(a);
-  m_cache_loglike_constterm[a] = val;
-}
-
-double bob::learn::misc::PLDABase::getLogLikeConstTerm(const size_t a) const
-{
-  if(!hasLogLikeConstTerm(a))
-    throw std::runtime_error("The LogLikelihood constant term for this number of samples is not currently in cache. You could use the getAddLogLikeConstTerm() method instead");
-  return (m_cache_loglike_constterm.find(a))->second;
-}
-
-double bob::learn::misc::PLDABase::getAddLogLikeConstTerm(const size_t a)
-{
-  if(!hasLogLikeConstTerm(a)) precomputeLogLikeConstTerm(a);
-  return m_cache_loglike_constterm[a];
-}
-
-void bob::learn::misc::PLDABase::clearMaps()
-{
-  m_cache_gamma.clear();
-  m_cache_loglike_constterm.clear();
-}
-
-double bob::learn::misc::PLDABase::computeLogLikelihoodPointEstimate(
-  const blitz::Array<double,1>& xij, const blitz::Array<double,1>& hi,
-  const blitz::Array<double,1>& wij) const
-{
-  // Check inputs
-  bob::core::array::assertSameDimensionLength(xij.extent(0), getDimD());
-  bob::core::array::assertSameDimensionLength(hi.extent(0), getDimF());
-  bob::core::array::assertSameDimensionLength(wij.extent(0), getDimG());
-  // Computes: -D/2 log(2pi) -1/2 log(det(\Sigma))
-  //   -1/2 {(x_{ij}-(\mu+Fh_{i}+Gw_{ij}))^{T}\Sigma^{-1}(x_{ij}-(\mu+Fh_{i}+Gw_{ij}))}
-  double res = -0.5*((double)m_dim_d)*log(2*M_PI) - 0.5*m_cache_logdet_sigma;
-  // m_tmp_d_1 = (x_{ij} - (\mu+Fh_{i}+Gw_{ij}))
-  m_tmp_d_1 = xij - m_mu;
-  bob::math::prod(m_F, hi, m_tmp_d_2);
-  m_tmp_d_1 -= m_tmp_d_2;
-  bob::math::prod(m_G, wij, m_tmp_d_2);
-  m_tmp_d_1 -= m_tmp_d_2;
-  // add third term to res
-  res += -0.5*blitz::sum(blitz::pow2(m_tmp_d_1) * m_cache_isigma);
-  return res;
-}
-
-namespace bob { namespace learn { namespace misc {
-  /**
-   * @brief Prints a PLDABase in the output stream. This will print
-   * the values of the parameters \f$\mu\f$, \f$F\f$, \f$G\f$ and
-   * \f$\Sigma\f$ of the PLDA model.
-   */
-  std::ostream& operator<<(std::ostream& os, const PLDABase& m) {
-    os << "mu = " << m.m_mu << std::endl;
-    os << "sigma = " << m.m_sigma << std::endl;
-    os << "F = " << m.m_F << std::endl;
-    os << "G = " << m.m_G << std::endl;
-    return os;
-  }
-} } }
-
-
-bob::learn::misc::PLDAMachine::PLDAMachine():
-  m_plda_base(),
-  m_n_samples(0), m_nh_sum_xit_beta_xi(0), m_weighted_sum(0),
-  m_loglikelihood(0), m_cache_gamma(), m_cache_loglike_constterm(),
-  m_tmp_d_1(0), m_tmp_d_2(0), m_tmp_nf_1(0), m_tmp_nf_2(0), m_tmp_nf_nf_1(0,0)
-{
-}
-
-bob::learn::misc::PLDAMachine::PLDAMachine(const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base):
-  m_plda_base(plda_base),
-  m_n_samples(0), m_nh_sum_xit_beta_xi(0), m_weighted_sum(plda_base->getDimF()),
-  m_loglikelihood(0), m_cache_gamma(), m_cache_loglike_constterm()
-{
-  resizeTmp();
-}
-
-
-bob::learn::misc::PLDAMachine::PLDAMachine(const bob::learn::misc::PLDAMachine& other):
-  m_plda_base(other.m_plda_base),
-  m_n_samples(other.m_n_samples),
-  m_nh_sum_xit_beta_xi(other.m_nh_sum_xit_beta_xi),
-  m_weighted_sum(bob::core::array::ccopy(other.m_weighted_sum)),
-  m_loglikelihood(other.m_loglikelihood), m_cache_gamma(),
-  m_cache_loglike_constterm(other.m_cache_loglike_constterm)
-{
-  bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
-  resizeTmp();
-}
-
-bob::learn::misc::PLDAMachine::PLDAMachine(bob::io::base::HDF5File& config,
-    const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base):
-  m_plda_base(plda_base)
-{
-  load(config);
-}
-
-bob::learn::misc::PLDAMachine::~PLDAMachine() {
-}
-
-bob::learn::misc::PLDAMachine& bob::learn::misc::PLDAMachine::operator=
-(const bob::learn::misc::PLDAMachine& other)
-{
-  if(this!=&other)
-  {
-    m_plda_base = other.m_plda_base;
-    m_n_samples = other.m_n_samples;
-    m_nh_sum_xit_beta_xi = other.m_nh_sum_xit_beta_xi;
-    m_weighted_sum.reference(bob::core::array::ccopy(other.m_weighted_sum));
-    m_loglikelihood = other.m_loglikelihood;
-    bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
-    m_cache_loglike_constterm = other.m_cache_loglike_constterm;
-    resizeTmp();
-  }
-  return *this;
-}
-
-bool bob::learn::misc::PLDAMachine::operator==
-    (const bob::learn::misc::PLDAMachine& b) const
-{
-  if (!(( (!m_plda_base && !b.m_plda_base) ||
-          ((m_plda_base && b.m_plda_base) && *(m_plda_base) == *(b.m_plda_base))) &&
-        m_n_samples == b.m_n_samples &&
-        m_nh_sum_xit_beta_xi ==b.m_nh_sum_xit_beta_xi &&
-        bob::core::array::isEqual(m_weighted_sum, b.m_weighted_sum) &&
-        m_loglikelihood == b.m_loglikelihood &&
-        bob::core::array::isEqual(m_cache_gamma, b.m_cache_gamma)))
-    return false;
-
-  // m_cache_loglike_constterm
-  if (this->m_cache_loglike_constterm.size() != b.m_cache_loglike_constterm.size())
-    return false;  // differing sizes, they are not the same
-  std::map<size_t, double>::const_iterator i, j;
-  for (i = this->m_cache_loglike_constterm.begin(), j = b.m_cache_loglike_constterm.begin();
-    i != this->m_cache_loglike_constterm.end(); ++i, ++j)
-  {
-    if (i->first != j->first || i->second != j->second)
-      return false;
-  }
-
-  return true;
-}
-
-bool bob::learn::misc::PLDAMachine::operator!=
-    (const bob::learn::misc::PLDAMachine& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::PLDAMachine::is_similar_to(
-  const bob::learn::misc::PLDAMachine& b, const double r_epsilon,
-  const double a_epsilon) const
-{
-  return (( (!m_plda_base && !b.m_plda_base) ||
-            ((m_plda_base && b.m_plda_base) &&
-             m_plda_base->is_similar_to(*(b.m_plda_base), r_epsilon, a_epsilon))) &&
-          m_n_samples == b.m_n_samples &&
-          bob::core::isClose(m_nh_sum_xit_beta_xi, b.m_nh_sum_xit_beta_xi, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_weighted_sum, b.m_weighted_sum, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_loglikelihood, b.m_loglikelihood, r_epsilon, a_epsilon) &&
-          bob::core::array::isClose(m_cache_gamma, b.m_cache_gamma, r_epsilon, a_epsilon) &&
-          bob::core::isClose(m_cache_loglike_constterm, b.m_cache_loglike_constterm, r_epsilon, a_epsilon));
-}
-
-void bob::learn::misc::PLDAMachine::load(bob::io::base::HDF5File& config)
-{
-  //reads all data directly into the member variables
-  m_n_samples = config.read<uint64_t>("n_samples");
-  m_nh_sum_xit_beta_xi = config.read<double>("nh_sum_xit_beta_xi");
-  m_weighted_sum.reference(config.readArray<double,1>("weighted_sum"));
-  m_loglikelihood = config.read<double>("loglikelihood");
-  // gamma and log like constant term (a-dependent terms)
-  clearMaps();
-  if(config.contains("a_indices"))
-  {
-    blitz::Array<uint32_t, 1> a_indices;
-    a_indices.reference(config.readArray<uint32_t,1>("a_indices"));
-    for(int i=0; i<a_indices.extent(0); ++i)
-    {
-      std::string str1 = "gamma_" + boost::lexical_cast<std::string>(a_indices(i));
-      m_cache_gamma[a_indices(i)].reference(config.readArray<double,2>(str1));
-      std::string str2 = "loglikeconstterm_" + boost::lexical_cast<std::string>(a_indices(i));
-      m_cache_loglike_constterm[a_indices(i)] = config.read<double>(str2);
-    }
-  }
-  resizeTmp();
-}
-
-void bob::learn::misc::PLDAMachine::save(bob::io::base::HDF5File& config) const
-{
-  config.set("n_samples", m_n_samples);
-  config.set("nh_sum_xit_beta_xi", m_nh_sum_xit_beta_xi);
-  config.setArray("weighted_sum", m_weighted_sum);
-  config.set("loglikelihood", m_loglikelihood);
-  // Gamma
-  if(m_cache_gamma.size() > 0)
-  {
-    blitz::Array<uint32_t, 1> a_indices(m_cache_gamma.size());
-    int i = 0;
-    for(std::map<size_t,blitz::Array<double,2> >::const_iterator
-        it=m_cache_gamma.begin(); it!=m_cache_gamma.end(); ++it)
-    {
-      a_indices(i) = it->first;
-      std::string str1 = "gamma_" + boost::lexical_cast<std::string>(it->first);
-      config.setArray(str1, it->second);
-      std::string str2 = "loglikeconstterm_" + boost::lexical_cast<std::string>(it->first);
-      double v = m_cache_loglike_constterm.find(it->first)->second;
-      config.set(str2, v);
-      ++i;
-    }
-    config.setArray("a_indices", a_indices);
-  }
-}
-
-void bob::learn::misc::PLDAMachine::setPLDABase(const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base)
-{
-  m_plda_base = plda_base;
-  m_weighted_sum.resizeAndPreserve(getDimF());
-  clearMaps();
-  resizeTmp();
-}
-
-
-void bob::learn::misc::PLDAMachine::setWeightedSum(const blitz::Array<double,1>& ws)
-{
-  if(ws.extent(0) != m_weighted_sum.extent(0)) {
-    boost::format m("size of parameter `ws' (%d) does not match the expected size (%d)");
-    m % ws.extent(0) % m_weighted_sum.extent(0);
-    throw std::runtime_error(m.str());
-  }
-  m_weighted_sum.reference(bob::core::array::ccopy(ws));
-}
-
-const blitz::Array<double,2>& bob::learn::misc::PLDAMachine::getGamma(const size_t a) const
-{
-  // Checks in both base machine and this machine
-  if (m_plda_base->hasGamma(a)) return m_plda_base->getGamma(a);
-  else if (!hasGamma(a))
-    throw std::runtime_error("Gamma for this number of samples is not currently in cache. You could use the getAddGamma() method instead");
-  return (m_cache_gamma.find(a))->second;
-}
-
-const blitz::Array<double,2>& bob::learn::misc::PLDAMachine::getAddGamma(const size_t a)
-{
-  if (m_plda_base->hasGamma(a)) return m_plda_base->getGamma(a);
-  else if (hasGamma(a)) return m_cache_gamma[a];
-  // else computes it and adds it to this machine
-  blitz::Array<double,2> gamma_a(getDimF(),getDimF());
-  m_cache_gamma[a].reference(gamma_a);
-  m_plda_base->computeGamma(a, gamma_a);
-  return m_cache_gamma[a];
-}
-
-double bob::learn::misc::PLDAMachine::getLogLikeConstTerm(const size_t a) const
-{
-  // Checks in both base machine and this machine
-  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-  if (m_plda_base->hasLogLikeConstTerm(a)) return m_plda_base->getLogLikeConstTerm(a);
-  else if (!hasLogLikeConstTerm(a))
-    throw std::runtime_error("The LogLikelihood constant term for this number of samples is not currently in cache. You could use the getAddLogLikeConstTerm() method instead");
-  return (m_cache_loglike_constterm.find(a))->second;
-}
-
-double bob::learn::misc::PLDAMachine::getAddLogLikeConstTerm(const size_t a)
-{
-  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-  if (m_plda_base->hasLogLikeConstTerm(a)) return m_plda_base->getLogLikeConstTerm(a);
-  else if (hasLogLikeConstTerm(a)) return m_cache_loglike_constterm[a];
-  // else computes it and adds it to this machine
-  m_cache_loglike_constterm[a] =
-        m_plda_base->computeLogLikeConstTerm(a, getAddGamma(a));
-  return m_cache_loglike_constterm[a];
-}
-
-void bob::learn::misc::PLDAMachine::clearMaps()
-{
-  m_cache_gamma.clear();
-  m_cache_loglike_constterm.clear();
-}
-
-double bob::learn::misc::PLDAMachine::forward(const blitz::Array<double,1>& sample)
-{
-  return forward_(sample);
-}
-
-double bob::learn::misc::PLDAMachine::forward_(const blitz::Array<double,1>& sample)
-{
-  // Computes the log likelihood ratio
-  return computeLogLikelihood(sample, true) - // match
-          (computeLogLikelihood(sample, false) + m_loglikelihood); // no match
-}
-
-double bob::learn::misc::PLDAMachine::forward(const blitz::Array<double,2>& samples)
-{
-  // Computes the log likelihood ratio
-  return computeLogLikelihood(samples, true) - // match
-          (computeLogLikelihood(samples, false) + m_loglikelihood); // no match
-}
-
-double bob::learn::misc::PLDAMachine::computeLogLikelihood(const blitz::Array<double,1>& sample,
-  bool enrol) const
-{
-  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-  // Check dimensionality
-  bob::core::array::assertSameDimensionLength(sample.extent(0), getDimD());
-
-  int n_samples = 1 + (enrol?m_n_samples:0);
-
-  // 3/ Third term of the likelihood: -1/2*X^T*(SIGMA+A.A^T)^-1*X
-  //    Efficient way: -1/2*sum_i(xi^T.sigma^-1.xi - xi^T.sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1.xi
-  //      -1/2*sumWeighted^T*(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1).F)^-1*sumWeighted
-  //      where sumWeighted = sum_i(F^T*(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1)*xi)
-  const blitz::Array<double,2>& beta = getPLDABase()->getBeta();
-  const blitz::Array<double,2>& Ft_beta = getPLDABase()->getFtBeta();
-  const blitz::Array<double,1>& mu = getPLDABase()->getMu();
-  double terma = (enrol?m_nh_sum_xit_beta_xi:0.);
-  // sumWeighted
-  if (enrol && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
-  else m_tmp_nf_1 = 0;
-
-  // terma += -1 / 2. * (xi^t*beta*xi)
-  m_tmp_d_1 = sample - mu;
-  bob::math::prod(beta, m_tmp_d_1, m_tmp_d_2);
-  terma += -1 / 2. * (blitz::sum(m_tmp_d_1*m_tmp_d_2));
-
-  // sumWeighted
-  bob::math::prod(Ft_beta, m_tmp_d_1, m_tmp_nf_2);
-  m_tmp_nf_1 += m_tmp_nf_2;
-  blitz::Array<double,2> gamma_a;
-  if (hasGamma(n_samples) || m_plda_base->hasGamma(n_samples))
-    gamma_a.reference(getGamma(n_samples));
-  else
-  {
-    gamma_a.reference(m_tmp_nf_nf_1);
-    m_plda_base->computeGamma(n_samples, gamma_a);
-  }
-  bob::math::prod(gamma_a, m_tmp_nf_1, m_tmp_nf_2);
-  double termb = 1 / 2. * (blitz::sum(m_tmp_nf_1*m_tmp_nf_2));
-
-  // 1/2/ Constant term of the log likelihood:
-  //      1/ First term of the likelihood: -Nsamples*D/2*log(2*PI)
-  //      2/ Second term of the likelihood: -1/2*log(det(SIGMA+A.A^T))
-  //        Efficient way: -Nsamples/2*log(det(sigma))-Nsamples/2*log(det(I+G^T.sigma^-1.G))
-  //       -1/2*log(det(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)*G^T*sigma^-1).F))
-  double log_likelihood; // = getAddLogLikeConstTerm(static_cast<size_t>(n_samples));
-  if (hasLogLikeConstTerm(n_samples) || m_plda_base->hasLogLikeConstTerm(n_samples))
-    log_likelihood = getLogLikeConstTerm(n_samples);
-  else
-    log_likelihood = m_plda_base->computeLogLikeConstTerm(n_samples, gamma_a);
-
-  log_likelihood += terma + termb;
-  return log_likelihood;
-}
-
-double bob::learn::misc::PLDAMachine::computeLogLikelihood(const blitz::Array<double,2>& samples,
-  bool enrol) const
-{
-  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-  // Check dimensionality
-  bob::core::array::assertSameDimensionLength(samples.extent(1), getDimD());
-
-  int n_samples = samples.extent(0) + (enrol?m_n_samples:0);
-  // 3/ Third term of the likelihood: -1/2*X^T*(SIGMA+A.A^T)^-1*X
-  //    Efficient way: -1/2*sum_i(xi^T.sigma^-1.xi - xi^T.sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1.xi
-  //      -1/2*sumWeighted^T*(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1).F)^-1*sumWeighted
-  //      where sumWeighted = sum_i(F^T*(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1)*xi)
-  const blitz::Array<double,2>& beta = getPLDABase()->getBeta();
-  const blitz::Array<double,2>& Ft_beta = getPLDABase()->getFtBeta();
-  const blitz::Array<double,1>& mu = getPLDABase()->getMu();
-  double terma = (enrol?m_nh_sum_xit_beta_xi:0.);
-  // sumWeighted
-  if (enrol && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
-  else m_tmp_nf_1 = 0;
-  for (int k=0; k<samples.extent(0); ++k)
-  {
-    blitz::Array<double,1> samp = samples(k,blitz::Range::all());
-    m_tmp_d_1 = samp - mu;
-    // terma += -1 / 2. * (xi^t*beta*xi)
-    bob::math::prod(beta, m_tmp_d_1, m_tmp_d_2);
-    terma += -1 / 2. * (blitz::sum(m_tmp_d_1*m_tmp_d_2));
-
-    // sumWeighted
-    bob::math::prod(Ft_beta, m_tmp_d_1, m_tmp_nf_2);
-    m_tmp_nf_1 += m_tmp_nf_2;
-  }
-
-  blitz::Array<double,2> gamma_a;
-  if (hasGamma(n_samples) || m_plda_base->hasGamma(n_samples))
-    gamma_a.reference(getGamma(n_samples));
-  else
-  {
-    gamma_a.reference(m_tmp_nf_nf_1);
-    m_plda_base->computeGamma(n_samples, gamma_a);
-  }
-  bob::math::prod(gamma_a, m_tmp_nf_1, m_tmp_nf_2);
-  double termb = 1 / 2. * (blitz::sum(m_tmp_nf_1*m_tmp_nf_2));
-
-  // 1/2/ Constant term of the log likelihood:
-  //      1/ First term of the likelihood: -Nsamples*D/2*log(2*PI)
-  //      2/ Second term of the likelihood: -1/2*log(det(SIGMA+A.A^T))
-  //        Efficient way: -Nsamples/2*log(det(sigma))-Nsamples/2*log(det(I+G^T.sigma^-1.G))
-  //       -1/2*log(det(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)*G^T*sigma^-1).F))
-  double log_likelihood; // = getAddLogLikeConstTerm(static_cast<size_t>(n_samples));
-  if (hasLogLikeConstTerm(n_samples) || m_plda_base->hasLogLikeConstTerm(n_samples))
-    log_likelihood = getLogLikeConstTerm(n_samples);
-  else
-    log_likelihood = m_plda_base->computeLogLikeConstTerm(n_samples, gamma_a);
-
-  log_likelihood += terma + termb;
-  return log_likelihood;
-}
-
-void bob::learn::misc::PLDAMachine::resize(const size_t dim_d, const size_t dim_f,
-  const size_t dim_g)
-{
-  m_weighted_sum.resizeAndPreserve(dim_f);
-  clearMaps();
-  resizeTmp();
-}
-
-void bob::learn::misc::PLDAMachine::resizeTmp()
-{
-  if (m_plda_base)
-  {
-    m_tmp_d_1.resize(getDimD());
-    m_tmp_d_2.resize(getDimD());
-    m_tmp_nf_1.resize(getDimF());
-    m_tmp_nf_2.resize(getDimF());
-    m_tmp_nf_nf_1.resize(getDimF(), getDimF());
-  }
-}
diff --git a/bob/learn/misc/cpp/PLDATrainer.cpp b/bob/learn/misc/cpp/PLDATrainer.cpp
deleted file mode 100644
index af50140fb2b2a5421e9bb6433d7e64bc575f5a0b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/PLDATrainer.cpp
+++ /dev/null
@@ -1,800 +0,0 @@
-/**
- * @date Fri Oct 14 18:07:56 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Probabilistic Linear Discriminant Analysis
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-
-#include <bob.learn.misc/PLDATrainer.h>
-#include <bob.core/check.h>
-#include <bob.core/array_copy.h>
-#include <bob.core/array_random.h>
-#include <bob.math/inv.h>
-#include <bob.math/svd.h>
-#include <bob.core/check.h>
-#include <bob.core/array_repmat.h>
-#include <algorithm>
-#include <limits>
-#include <vector>
-
-#include <bob.math/linear.h>
-#include <bob.math/linsolve.h>
-
-
-
-bob::learn::misc::PLDATrainer::PLDATrainer(const bool use_sum_second_order):
-  m_rng(new boost::mt19937()),
-  m_dim_d(0), m_dim_f(0), m_dim_g(0),
-  m_use_sum_second_order(use_sum_second_order),
-  m_initF_method(bob::learn::misc::PLDATrainer::RANDOM_F), m_initF_ratio(1.),
-  m_initG_method(bob::learn::misc::PLDATrainer::RANDOM_G), m_initG_ratio(1.),
-  m_initSigma_method(bob::learn::misc::PLDATrainer::RANDOM_SIGMA),
-  m_initSigma_ratio(1.),
-  m_cache_S(0,0),
-  m_cache_z_first_order(0), m_cache_sum_z_second_order(0,0), m_cache_z_second_order(0),
-  m_cache_n_samples_per_id(0), m_cache_n_samples_in_training(), m_cache_B(0,0),
-  m_cache_Ft_isigma_G(0,0), m_cache_eta(0,0), m_cache_zeta(), m_cache_iota(),
-  m_tmp_nf_1(0), m_tmp_nf_2(0), m_tmp_ng_1(0),
-  m_tmp_D_1(0), m_tmp_D_2(0),
-  m_tmp_nfng_nfng(0,0), m_tmp_D_nfng_1(0,0), m_tmp_D_nfng_2(0,0)
-{
-}
-
-bob::learn::misc::PLDATrainer::PLDATrainer(const bob::learn::misc::PLDATrainer& other):
-  m_rng(other.m_rng),
-  m_dim_d(other.m_dim_d), m_dim_f(other.m_dim_f), m_dim_g(other.m_dim_g),
-  m_use_sum_second_order(other.m_use_sum_second_order),
-  m_initF_method(other.m_initF_method), m_initF_ratio(other.m_initF_ratio),
-  m_initG_method(other.m_initG_method), m_initG_ratio(other.m_initG_ratio),
-  m_initSigma_method(other.m_initSigma_method), m_initSigma_ratio(other.m_initSigma_ratio),
-  m_cache_S(bob::core::array::ccopy(other.m_cache_S)),
-  m_cache_z_first_order(),
-  m_cache_sum_z_second_order(bob::core::array::ccopy(other.m_cache_sum_z_second_order)),
-  m_cache_z_second_order(),
-  m_cache_n_samples_per_id(other.m_cache_n_samples_per_id),
-  m_cache_n_samples_in_training(other.m_cache_n_samples_in_training),
-  m_cache_B(bob::core::array::ccopy(other.m_cache_B)),
-  m_cache_Ft_isigma_G(bob::core::array::ccopy(other.m_cache_Ft_isigma_G)),
-  m_cache_eta(bob::core::array::ccopy(other.m_cache_eta))
-{
-  bob::core::array::ccopy(other.m_cache_z_first_order, m_cache_z_first_order);
-  bob::core::array::ccopy(other.m_cache_z_second_order, m_cache_z_second_order);
-  bob::core::array::ccopy(other.m_cache_zeta, m_cache_zeta);
-  bob::core::array::ccopy(other.m_cache_iota, m_cache_iota);
-  // Resize working arrays
-  resizeTmp();
-}
-
-bob::learn::misc::PLDATrainer::~PLDATrainer() {}
-
-bob::learn::misc::PLDATrainer& bob::learn::misc::PLDATrainer::operator=
-(const bob::learn::misc::PLDATrainer& other)
-{
-  if(this != &other)
-  {
-    m_rng = m_rng,
-    m_dim_d = other.m_dim_d;
-    m_dim_f = other.m_dim_f;
-    m_dim_g = other.m_dim_g;
-    m_use_sum_second_order = other.m_use_sum_second_order;
-    m_initF_method = other.m_initF_method;
-    m_initF_ratio = other.m_initF_ratio;
-    m_initG_method = other.m_initG_method;
-    m_initG_ratio = other.m_initG_ratio;
-    m_initSigma_method = other.m_initSigma_method;
-    m_initSigma_ratio = other.m_initSigma_ratio;
-    m_cache_S = bob::core::array::ccopy(other.m_cache_S);
-    bob::core::array::ccopy(other.m_cache_z_first_order, m_cache_z_first_order);
-    m_cache_sum_z_second_order = bob::core::array::ccopy(other.m_cache_sum_z_second_order);
-    bob::core::array::ccopy(other.m_cache_z_second_order, m_cache_z_second_order);
-    m_cache_n_samples_per_id = other.m_cache_n_samples_per_id;
-    m_cache_n_samples_in_training = other.m_cache_n_samples_in_training;
-    m_cache_B = bob::core::array::ccopy(other.m_cache_B);
-    m_cache_Ft_isigma_G = bob::core::array::ccopy(other.m_cache_Ft_isigma_G);
-    m_cache_eta = bob::core::array::ccopy(other.m_cache_eta);
-    bob::core::array::ccopy(other.m_cache_iota, m_cache_iota);
-    // Resize working arrays
-    resizeTmp();
-  }
-  return *this;
-}
-
-bool bob::learn::misc::PLDATrainer::operator==
-  (const bob::learn::misc::PLDATrainer& other) const
-{
-  return m_rng == m_rng &&
-         m_dim_d == other.m_dim_d &&
-         m_dim_f == other.m_dim_f &&
-         m_dim_g == other.m_dim_g &&
-         m_initF_method == other.m_initF_method &&
-         m_initF_ratio == other.m_initF_ratio &&
-         m_initG_method == other.m_initG_method &&
-         m_initG_ratio == other.m_initG_ratio &&
-         m_initSigma_method == other.m_initSigma_method &&
-         m_initSigma_ratio == other.m_initSigma_ratio &&
-         bob::core::array::isEqual(m_cache_S, m_cache_S) &&
-         bob::core::array::isEqual(m_cache_z_first_order, other.m_cache_z_first_order) &&
-         bob::core::array::isEqual(m_cache_sum_z_second_order, other.m_cache_sum_z_second_order) &&
-         bob::core::array::isEqual(m_cache_z_second_order, other.m_cache_z_second_order) &&
-         m_cache_n_samples_per_id.size() == m_cache_n_samples_per_id.size() &&
-         std::equal(m_cache_n_samples_per_id.begin(), m_cache_n_samples_per_id.end(), other.m_cache_n_samples_per_id.begin()) &&
-         m_cache_n_samples_in_training.size() == m_cache_n_samples_in_training.size() &&
-         std::equal(m_cache_n_samples_in_training.begin(), m_cache_n_samples_in_training.end(), other.m_cache_n_samples_in_training.begin()) &&
-         bob::core::array::isEqual(m_cache_B, other.m_cache_B) &&
-         bob::core::array::isEqual(m_cache_Ft_isigma_G, other.m_cache_Ft_isigma_G) &&
-         bob::core::array::isEqual(m_cache_eta, other.m_cache_eta) &&
-         bob::core::array::isEqual(m_cache_zeta, other.m_cache_zeta) &&
-         bob::core::array::isEqual(m_cache_iota, other.m_cache_iota);
-}
-
-bool bob::learn::misc::PLDATrainer::operator!=
-  (const bob::learn::misc::PLDATrainer &other) const
-{
-  return !(this->operator==(other));
-}
-
-bool bob::learn::misc::PLDATrainer::is_similar_to
-  (const bob::learn::misc::PLDATrainer &other, const double r_epsilon,
-   const double a_epsilon) const
-{
-  return m_rng == m_rng &&
-         m_dim_d == other.m_dim_d &&
-         m_dim_f == other.m_dim_f &&
-         m_dim_g == other.m_dim_g &&
-         m_use_sum_second_order == other.m_use_sum_second_order &&
-         m_initF_method == other.m_initF_method &&
-         bob::core::isClose(m_initF_ratio, other.m_initF_ratio, r_epsilon, a_epsilon) &&
-         m_initG_method == other.m_initG_method &&
-         bob::core::isClose(m_initG_ratio, other.m_initG_ratio, r_epsilon, a_epsilon) &&
-         m_initSigma_method == other.m_initSigma_method &&
-         bob::core::isClose(m_initSigma_ratio, other.m_initSigma_ratio, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_S, m_cache_S, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_z_first_order, other.m_cache_z_first_order, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_sum_z_second_order, other.m_cache_sum_z_second_order, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_z_second_order, other.m_cache_z_second_order, r_epsilon, a_epsilon) &&
-         m_cache_n_samples_per_id.size() == m_cache_n_samples_per_id.size() &&
-         std::equal(m_cache_n_samples_per_id.begin(), m_cache_n_samples_per_id.end(), other.m_cache_n_samples_per_id.begin()) &&
-         m_cache_n_samples_in_training.size() == m_cache_n_samples_in_training.size() &&
-         std::equal(m_cache_n_samples_in_training.begin(), m_cache_n_samples_in_training.end(), other.m_cache_n_samples_in_training.begin()) &&
-         bob::core::array::isClose(m_cache_B, other.m_cache_B, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_Ft_isigma_G, other.m_cache_Ft_isigma_G, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_eta, other.m_cache_eta, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_zeta, other.m_cache_zeta, r_epsilon, a_epsilon) &&
-         bob::core::array::isClose(m_cache_iota, other.m_cache_iota, r_epsilon, a_epsilon);
-}
-
-void bob::learn::misc::PLDATrainer::initialize(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Checks training data
-  checkTrainingData(v_ar);
-
-  // Gets dimension (first Arrayset)
-  size_t n_features = v_ar[0].extent(1);
-  m_dim_d = machine.getDimD();
-  // Get dimensionalities from the PLDABase
-  bob::core::array::assertSameDimensionLength(n_features, m_dim_d);
-  m_dim_f = machine.getDimF();
-  m_dim_g = machine.getDimG();
-
-  // Reinitializes array members
-  initMembers(v_ar);
-
-  // Computes the mean and the covariance if required
-  computeMeanVariance(machine, v_ar);
-
-  // Initialization (e.g. using scatter)
-  initFGSigma(machine, v_ar);
-}
-
-void bob::learn::misc::PLDATrainer::finalize(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Precomputes constant parts of the log likelihood and (gamma_a)
-  precomputeLogLike(machine, v_ar);
-  // Adds the case 1 sample if not already done (always used for scoring)
-  machine.getAddGamma(1);
-  machine.getAddLogLikeConstTerm(1);
-}
-
-void bob::learn::misc::PLDATrainer::checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Checks that the vector of Arraysets is not empty
-  if (v_ar.size() == 0) {
-    throw std::runtime_error("input training set is empty");
-  }
-
-  // Gets dimension (first Arrayset)
-  int n_features = v_ar[0].extent(1);
-  // Checks dimension consistency
-  for (size_t i=0; i<v_ar.size(); ++i) {
-    if (v_ar[i].extent(1) != n_features) {
-      boost::format m("number of features (columns) of array for class %u (%d) does not match that of array for class 0 (%d)");
-      m % i % v_ar[0].extent(1) % n_features;
-      throw std::runtime_error(m.str());
-    }
-  }
-}
-
-void bob::learn::misc::PLDATrainer::initMembers(const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Gets dimension (first Arrayset)
-  const size_t n_features = v_ar[0].extent(1); // dimensionality of the data
-  const size_t n_identities = v_ar.size();
-
-  m_cache_S.resize(n_features, n_features);
-  m_cache_sum_z_second_order.resize(m_dim_f+m_dim_g, m_dim_f+m_dim_g);
-
-  // Loops over the identities
-  for (size_t i=0; i<n_identities; ++i)
-  {
-    // Number of training samples for this identity
-    const size_t n_i = v_ar[i].extent(0);
-    // m_cache_z_first_order
-    blitz::Array<double,2> z_i(n_i, m_dim_f+m_dim_g);
-    m_cache_z_first_order.push_back(z_i);
-    // m_z_second_order
-    if (!m_use_sum_second_order)
-    {
-      blitz::Array<double,3> z2_i(n_i, m_dim_f+m_dim_g, m_dim_f+m_dim_g);
-      m_cache_z_second_order.push_back(z2_i);
-    }
-
-    // m_cache_n_samples_per_id
-    m_cache_n_samples_per_id.push_back(n_i);
-
-    // Maps dependent on the number of samples per identity
-    std::map<size_t,bool>::iterator it;
-    it = m_cache_n_samples_in_training.find(n_i);
-    if (it == m_cache_n_samples_in_training.end())
-    {
-      // Indicates if there are identities with n_i training samples and if
-      // corresponding matrices are up to date.
-      m_cache_n_samples_in_training[n_i] = false;
-      // Allocates arrays for identities with n_i training samples
-      m_cache_zeta[n_i].reference(blitz::Array<double,2>(m_dim_g, m_dim_g));
-      m_cache_iota[n_i].reference(blitz::Array<double,2>(m_dim_f, m_dim_g));
-    }
-  }
-
-  m_cache_B.resize(n_features, m_dim_f+m_dim_g);
-  m_cache_Ft_isigma_G.resize(m_dim_f, m_dim_g);
-  m_cache_eta.resize(m_dim_f, m_dim_g);
-
-  // Working arrays
-  resizeTmp();
-}
-
-void bob::learn::misc::PLDATrainer::resizeTmp()
-{
-  m_tmp_nf_1.resize(m_dim_f);
-  m_tmp_nf_2.resize(m_dim_f);
-  m_tmp_ng_1.resize(m_dim_g);
-  m_tmp_D_1.resize(m_dim_d);
-  m_tmp_D_2.resize(m_dim_d);
-  m_tmp_nfng_nfng.resize(m_dim_f+m_dim_g, m_dim_f+m_dim_g);
-  m_tmp_D_nfng_1.resize(m_dim_d, m_dim_f+m_dim_g);
-  m_tmp_D_nfng_2.resize(m_dim_d, m_dim_f+m_dim_g);
-}
-
-void bob::learn::misc::PLDATrainer::computeMeanVariance(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  blitz::Array<double,1>& mu = machine.updateMu();
-  blitz::Range all = blitz::Range::all();
-  // TODO: Uncomment variance computation if required
-  /*  if(m_compute_likelihood)
-  {
-    // loads all the data in a single shot - required for scatter
-    blitz::Array<double,2> data(n_features, n_samples);
-    for (size_t i=0; i<n_samples; ++i)
-      data(all,i) = ar(i,all);
-    // Mean and scatter computation
-    bob::math::scatter(data, m_cache_S, mu);
-    // divides scatter by N-1
-    m_cache_S /= static_cast<double>(n_samples-1);
-  }
-  else */
-  {
-    // Computes the mean and updates mu
-    mu = 0.;
-    size_t n_samples = 0;
-    for (size_t j=0; j<v_ar.size(); ++j) {
-      n_samples += v_ar[j].extent(0);
-      for (int i=0; i<v_ar[j].extent(0); ++i)
-        mu += v_ar[j](i,all);
-    }
-    mu /= static_cast<double>(n_samples);
-    m_cache_S = 0.;
-  }
-}
-
-void bob::learn::misc::PLDATrainer::initFGSigma(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Initializes F, G and sigma
-  initF(machine, v_ar);
-  initG(machine, v_ar);
-  initSigma(machine, v_ar);
-
-  // Precomputes values using new F, G and sigma
-  machine.precompute();
-}
-
-void bob::learn::misc::PLDATrainer::initF(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  blitz::Array<double,2>& F = machine.updateF();
-  blitz::Range a = blitz::Range::all();
-
-  // 1: between-class scatter
-  if (m_initF_method == bob::learn::misc::PLDATrainer::BETWEEN_SCATTER)
-  {
-    if (machine.getDimF() > v_ar.size()) {
-      boost::format m("The rank of the matrix F ('%ld') can't be larger than the number of classes in the training set ('%ld')");
-      m % machine.getDimF() % v_ar.size();
-      throw std::runtime_error(m.str());
-    }
-
-    // a/ Computes between-class scatter matrix
-    blitz::firstIndex bi;
-    blitz::secondIndex bj;
-    blitz::Array<double,2> S(machine.getDimD(), v_ar.size());
-    S = 0.;
-    m_tmp_D_1 = 0.;
-    for (size_t i=0; i<v_ar.size(); ++i)
-    {
-      blitz::Array<double,1> Si = S(blitz::Range::all(),i);
-      Si = 0.;
-      for (int j=0; j<v_ar[i].extent(0); ++j)
-      {
-        // Si += x_ij
-        Si += v_ar[i](j,a);
-      }
-      // Si = mean of the samples class i
-      Si /= static_cast<double>(v_ar[i].extent(0));
-      m_tmp_D_1 += Si;
-    }
-    m_tmp_D_1 /= static_cast<double>(v_ar.size());
-
-    // b/ Removes the mean
-    S = S(bi,bj) - m_tmp_D_1(bi);
-
-    // c/ SVD of the between-class scatter matrix
-    const size_t n_singular = std::min(machine.getDimD(),v_ar.size());
-    blitz::Array<double,2> U(machine.getDimD(), n_singular);
-    blitz::Array<double,1> sigma(n_singular);
-    bob::math::svd(S, U, sigma);
-
-    // d/ Updates F
-    blitz::Array<double,2> Uslice = U(a, blitz::Range(0,m_dim_f-1));
-    blitz::Array<double,1> sigma_slice = sigma(blitz::Range(0,m_dim_f-1));
-    sigma_slice = blitz::sqrt(sigma_slice);
-    F = Uslice(bi,bj) / sigma_slice(bj);
-  }
-  // otherwise: random initialization
-  else {
-    // F initialization
-    bob::core::array::randn(*m_rng, F);
-    F *= m_initF_ratio;
-  }
-}
-
-void bob::learn::misc::PLDATrainer::initG(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  blitz::Array<double,2>& G = machine.updateG();
-  blitz::Range a = blitz::Range::all();
-
-  // 1: within-class scatter
-  if (m_initG_method == bob::learn::misc::PLDATrainer::WITHIN_SCATTER)
-  {
-    // a/ Computes within-class scatter matrix
-    blitz::firstIndex bi;
-    blitz::secondIndex bj;
-    size_t Nsamples=0;
-    for (size_t i=0; i<v_ar.size(); ++i)
-      Nsamples += v_ar[i].extent(0);
-
-    blitz::Array<double,2> S(machine.getDimD(), Nsamples);
-    S = 0.;
-    m_tmp_D_1 = 0.;
-    int counter = 0;
-    for (size_t i=0; i<v_ar.size(); ++i)
-    {
-      // Computes the mean of the samples class i
-      m_tmp_D_2 = 0.;
-      for (int j=0; j<v_ar[i].extent(0); ++j)
-      {
-        // m_tmp_D_2 += x_ij
-        m_tmp_D_2 += v_ar[i](j,a);
-      }
-      // m_tmp_D_2 = mean of the samples class i
-      m_tmp_D_2 /= static_cast<double>(v_ar[i].extent(0));
-
-      // Generates the scatter
-      for (int j=0; j<v_ar[i].extent(0); ++j)
-      {
-        blitz::Array<double,1> Si = S(a, counter);
-        // Si = x_ij - mean_i
-        Si = v_ar[i](j,a) - m_tmp_D_2;
-        // mean of the within class
-        m_tmp_D_1 += Si;
-        ++counter;
-      }
-    }
-    m_tmp_D_1 /= static_cast<double>(Nsamples);
-
-    // b/ Removes the mean
-    S = S(bi,bj) - m_tmp_D_1(bi);
-
-    // c/ SVD of the between-class scatter matrix
-    blitz::Array<double,2> U(m_dim_d, std::min(m_dim_d, Nsamples));
-    blitz::Array<double,1> sigma(std::min(m_dim_d, Nsamples));
-    bob::math::svd(S, U, sigma);
-
-    // d/ Updates G
-    blitz::Array<double,2> Uslice = U(blitz::Range::all(), blitz::Range(0,m_dim_g-1));
-    blitz::Array<double,1> sigma_slice = sigma(blitz::Range(0,m_dim_g-1));
-    sigma_slice = blitz::sqrt(sigma_slice);
-    G = Uslice(bi,bj) / sigma_slice(bj);
-  }
-  // otherwise: random initialization
-  else {
-    // G initialization
-    bob::core::array::randn(*m_rng, G);
-    G *= m_initG_ratio;
-  }
-}
-
-void bob::learn::misc::PLDATrainer::initSigma(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  blitz::Array<double,1>& sigma = machine.updateSigma();
-  blitz::Range a = blitz::Range::all();
-
-  // 1: percentage of the variance of G
-  if (m_initSigma_method == bob::learn::misc::PLDATrainer::VARIANCE_G) {
-    const blitz::Array<double,2>& G = machine.getG();
-    blitz::secondIndex bj;
-    m_tmp_D_1 = blitz::mean(G, bj);
-    // Updates sigma
-    sigma = blitz::fabs(m_tmp_D_1) * m_initSigma_ratio;
-  }
-  // 2: constant value
-  else if (m_initSigma_method == bob::learn::misc::PLDATrainer::CONSTANT) {
-    sigma = m_initSigma_ratio;
-  }
-  // 3: percentage of the variance of the data
-  else if (m_initSigma_method == bob::learn::misc::PLDATrainer::VARIANCE_DATA) {
-    // a/ Computes the global mean
-    //    m_tmp_D_1 = 1/N sum_i x_i
-    m_tmp_D_1 = 0.;
-    size_t Ns = 0;
-    for (size_t i=0; i<v_ar.size(); ++i)
-    {
-      for (int j=0; j<v_ar[i].extent(0); ++j)
-        m_tmp_D_1 += v_ar[i](j,a);
-      Ns += v_ar[i].extent(0);
-    }
-    m_tmp_D_1 /= static_cast<double>(Ns);
-
-    // b/ Computes the variance:
-    m_tmp_D_2 = 0.;
-    for (size_t i=0; i<v_ar.size(); ++i)
-      for (int j=0; j<v_ar[i].extent(0); ++j)
-        m_tmp_D_2 += blitz::pow2(v_ar[i](j,a) - m_tmp_D_1);
-    sigma = m_initSigma_ratio * m_tmp_D_2 / static_cast<double>(Ns-1);
-  }
-  // otherwise: random initialization
-  else {
-    // sigma initialization
-    bob::core::array::randn(*m_rng, sigma);
-    sigma = blitz::fabs(sigma) * m_initSigma_ratio;
-  }
-  // Apply variance threshold
-  machine.applyVarianceThreshold();
-}
-
-void bob::learn::misc::PLDATrainer::eStep(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Precomputes useful variables using current estimates of F,G, and sigma
-  precomputeFromFGSigma(machine);
-  // Gets the mean mu from the machine
-  const blitz::Array<double,1>& mu = machine.getMu();
-  const blitz::Array<double,2>& alpha = machine.getAlpha();
-  const blitz::Array<double,2>& F = machine.getF();
-  const blitz::Array<double,2>& FtBeta = machine.getFtBeta();
-  const blitz::Array<double,2>& GtISigma = machine.getGtISigma();
-  blitz::Range a = blitz::Range::all();
-
-  // blitz indices
-  blitz::firstIndex bi;
-  blitz::secondIndex bj;
-  // Initializes sum of z second order statistics to 0
-  m_cache_sum_z_second_order = 0.;
-  for (size_t i=0; i<v_ar.size(); ++i)
-  {
-    // Computes expectation of z_ij = [h_i w_ij]
-    // 1/a/ Computes expectation of h_i
-    // Loop over the samples
-    m_tmp_nf_1 = 0.;
-    for (int j=0; j<v_ar[i].extent(0); ++j)
-    {
-      // m_tmp_D_1 = x_sj-mu
-      m_tmp_D_1 = v_ar[i](j,a) - mu;
-
-      // m_tmp_nf_2 = F^T.beta.(x_sj-mu)
-      bob::math::prod(FtBeta, m_tmp_D_1, m_tmp_nf_2);
-      // m_tmp_nf_1 = sum_j F^T.beta.(x_sj-mu)
-      m_tmp_nf_1 += m_tmp_nf_2;
-    }
-    const blitz::Array<double,2>& gamma_a = machine.getAddGamma(v_ar[i].extent(0));
-    blitz::Range r_hi(0, m_dim_f-1);
-    // m_tmp_nf_2 = E(h_i) = gamma_A  sum_j F^T.beta.(x_sj-mu)
-    bob::math::prod(gamma_a, m_tmp_nf_1, m_tmp_nf_2);
-
-    // 1/b/ Precomputes: m_tmp_D_2 = F.E{h_i}
-    bob::math::prod(F, m_tmp_nf_2, m_tmp_D_2);
-
-    // 2/ First and second order statistics of z
-    // Precomputed values
-    blitz::Array<double,2>& zeta_a = m_cache_zeta[v_ar[i].extent(0)];
-    blitz::Array<double,2>& iota_a = m_cache_iota[v_ar[i].extent(0)];
-    blitz::Array<double,2> iotat_a = iota_a.transpose(1,0);
-
-    // Extracts statistics of z_ij = [h_i w_ij] from y_i = [h_i w_i1 ... w_iJ]
-    blitz::Range r1(0, m_dim_f-1);
-    blitz::Range r2(m_dim_f, m_dim_f+m_dim_g-1);
-    for (int j=0; j<v_ar[i].extent(0); ++j)
-    {
-      // 1/ First order statistics of z
-      blitz::Array<double,1> z_first_order_ij_1 = m_cache_z_first_order[i](j,r1);
-      z_first_order_ij_1 = m_tmp_nf_2; // E{h_i}
-      // m_tmp_D_1 = x_sj - mu - F.E{h_i}
-      m_tmp_D_1 = v_ar[i](j,a) - mu - m_tmp_D_2;
-      // m_tmp_ng_1 = G^T.sigma^-1.(x_sj-mu-fhi)
-      bob::math::prod(GtISigma, m_tmp_D_1, m_tmp_ng_1);
-      // z_first_order_ij_2 = (Id+G^T.sigma^-1.G)^-1.G^T.sigma^-1.(x_sj-mu) = E{w_ij}
-      blitz::Array<double,1> z_first_order_ij_2 = m_cache_z_first_order[i](j,r2);
-      bob::math::prod(alpha, m_tmp_ng_1, z_first_order_ij_2);
-
-      // 2/ Second order statistics of z
-      blitz::Array<double,2> z_sum_so_11 = m_cache_sum_z_second_order(r1,r1);
-      blitz::Array<double,2> z_sum_so_12 = m_cache_sum_z_second_order(r1,r2);
-      blitz::Array<double,2> z_sum_so_21 = m_cache_sum_z_second_order(r2,r1);
-      blitz::Array<double,2> z_sum_so_22 = m_cache_sum_z_second_order(r2,r2);
-      if (m_use_sum_second_order)
-      {
-        z_sum_so_11 += gamma_a + z_first_order_ij_1(bi) * z_first_order_ij_1(bj);
-        z_sum_so_12 += iota_a + z_first_order_ij_1(bi) * z_first_order_ij_2(bj);
-        z_sum_so_21 += iotat_a + z_first_order_ij_2(bi) * z_first_order_ij_1(bj);
-        z_sum_so_22 += zeta_a + z_first_order_ij_2(bi) * z_first_order_ij_2(bj);
-      }
-      else
-      {
-        blitz::Array<double,2> z_so_11 = m_cache_z_second_order[i](j,r1,r1);
-        z_so_11 = gamma_a + z_first_order_ij_1(bi) * z_first_order_ij_1(bj);
-        z_sum_so_11 += z_so_11;
-        blitz::Array<double,2> z_so_12 = m_cache_z_second_order[i](j,r1,r2);
-        z_so_12 = iota_a + z_first_order_ij_1(bi) * z_first_order_ij_2(bj);
-        z_sum_so_12 += z_so_12;
-        blitz::Array<double,2> z_so_21 = m_cache_z_second_order[i](j,r2,r1);
-        z_so_21 = iotat_a + z_first_order_ij_2(bi) * z_first_order_ij_1(bj);
-        z_sum_so_21 += z_so_21;
-        blitz::Array<double,2> z_so_22 = m_cache_z_second_order[i](j,r2,r2);
-        z_so_22 = zeta_a + z_first_order_ij_2(bi) * z_first_order_ij_2(bj);
-        z_sum_so_22 += z_so_22;
-      }
-    }
-  }
-}
-
-void bob::learn::misc::PLDATrainer::precomputeFromFGSigma(bob::learn::misc::PLDABase& machine)
-{
-  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
-  // provide a non-const version of transpose())
-  const blitz::Array<double,2>& F = machine.getF();
-  const blitz::Array<double,2> Ft = const_cast<blitz::Array<double,2>&>(F).transpose(1,0);
-  const blitz::Array<double,2>& Gt_isigma = machine.getGtISigma();
-  const blitz::Array<double,2> Gt_isigma_t = const_cast<blitz::Array<double,2>&>(Gt_isigma).transpose(1,0);
-  const blitz::Array<double,2>& alpha = machine.getAlpha();
-
-  // Precomputes F, G and sigma-based expressions
-  bob::math::prod(Ft, Gt_isigma_t, m_cache_Ft_isigma_G);
-  bob::math::prod(m_cache_Ft_isigma_G, alpha, m_cache_eta);
-  blitz::Array<double,2> etat = m_cache_eta.transpose(1,0);
-
-  // Reinitializes all the zeta_a and iota_a
-  std::map<size_t,bool>::iterator it;
-  for (it=m_cache_n_samples_in_training.begin(); it!=m_cache_n_samples_in_training.end();
-      ++it)
-    it->second = false;
-
-  for (it=m_cache_n_samples_in_training.begin(); it!=m_cache_n_samples_in_training.end();
-      ++it)
-  {
-    size_t n_i = it->first;
-    // Precomputes zeta and iota for identities with q_i training samples,
-    // if not already done
-    if (!it->second)
-    {
-      const blitz::Array<double,2>& gamma_a = machine.getAddGamma(n_i);
-      blitz::Array<double,2>& zeta_a = m_cache_zeta[n_i];
-      blitz::Array<double,2>& iota_a = m_cache_iota[n_i];
-      bob::math::prod(gamma_a, m_cache_eta, iota_a);
-      bob::math::prod(etat, iota_a, zeta_a);
-      zeta_a += alpha;
-      iota_a = - iota_a;
-      // Now up to date
-      it->second = true;
-    }
-  }
-}
-
-void bob::learn::misc::PLDATrainer::precomputeLogLike(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // Precomputes the log determinant of alpha and sigma
-  machine.precomputeLogLike();
-
-  // Precomputes the log likelihood constant term
-  std::map<size_t,bool>::iterator it;
-  for (it=m_cache_n_samples_in_training.begin();
-       it!=m_cache_n_samples_in_training.end(); ++it)
-  {
-    // Precomputes the log likelihood constant term for identities with q_i
-    // training samples, if not already done
-    machine.getAddLogLikeConstTerm(it->first);
-  }
-}
-
-
-void bob::learn::misc::PLDATrainer::mStep(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  // 1/ New estimate of B = {F G}
-  updateFG(machine, v_ar);
-
-  // 2/ New estimate of Sigma
-  updateSigma(machine, v_ar);
-
-  // 3/ Precomputes new values after updating F, G and sigma
-  machine.precompute();
-  // Precomputes useful variables using current estimates of F,G, and sigma
-  precomputeFromFGSigma(machine);
-}
-
-void bob::learn::misc::PLDATrainer::updateFG(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  /// Computes the B matrix (B = [F G])
-  /// B = (sum_ij (x_ij-mu).E{z_i}^T).(sum_ij E{z_i.z_i^T})^-1
-
-  // 1/ Computes the numerator (sum_ij (x_ij-mu).E{z_i}^T)
-  // Gets the mean mu from the machine
-  const blitz::Array<double,1>& mu = machine.getMu();
-  blitz::Range a = blitz::Range::all();
-  m_tmp_D_nfng_2 = 0.;
-  for (size_t i=0; i<v_ar.size(); ++i)
-  {
-    // Loop over the samples
-    for (int j=0; j<v_ar[i].extent(0); ++j)
-    {
-      // m_tmp_D_1 = x_sj-mu
-      m_tmp_D_1 = v_ar[i](j,a) - mu;
-      // z_first_order_ij = E{z_ij}
-      blitz::Array<double,1> z_first_order_ij = m_cache_z_first_order[i](j, a);
-      // m_tmp_D_nfng_1 = (x_sj-mu).E{z_ij}^T
-      bob::math::prod(m_tmp_D_1, z_first_order_ij, m_tmp_D_nfng_1);
-      m_tmp_D_nfng_2 += m_tmp_D_nfng_1;
-    }
-  }
-
-  // 2/ Computes the denominator inv(sum_ij E{z_i.z_i^T})
-  bob::math::inv(m_cache_sum_z_second_order, m_tmp_nfng_nfng);
-
-  // 3/ Computes numerator / denominator
-  bob::math::prod(m_tmp_D_nfng_2, m_tmp_nfng_nfng, m_cache_B);
-
-  // 4/ Updates the machine
-  blitz::Array<double, 2>& F = machine.updateF();
-  blitz::Array<double, 2>& G = machine.updateG();
-  F = m_cache_B(a, blitz::Range(0, m_dim_f-1));
-  G = m_cache_B(a, blitz::Range(m_dim_f, m_dim_f+m_dim_g-1));
-}
-
-void bob::learn::misc::PLDATrainer::updateSigma(bob::learn::misc::PLDABase& machine,
-  const std::vector<blitz::Array<double,2> >& v_ar)
-{
-  /// Computes the Sigma matrix
-  /// Sigma = 1/IJ sum_ij Diag{(x_ij-mu).(x_ij-mu)^T - B.E{z_i}.(x_ij-mu)^T}
-
-  // Gets the mean mu and the matrix sigma from the machine
-  blitz::Array<double,1>& sigma = machine.updateSigma();
-  const blitz::Array<double,1>& mu = machine.getMu();
-  blitz::Range a = blitz::Range::all();
-
-  sigma = 0.;
-  size_t n_IJ=0; /// counts the number of samples
-  for (size_t i=0; i<v_ar.size(); ++i)
-  {
-    // Loop over the samples
-    for (int j=0; j<v_ar[i].extent(0); ++j)
-    {
-      // m_tmp_D_1 = x_ij-mu
-      m_tmp_D_1 = v_ar[i](j,a) - mu;
-      // sigma += Diag{(x_ij-mu).(x_ij-mu)^T}
-      sigma += blitz::pow2(m_tmp_D_1);
-
-      // z_first_order_ij = E{z_ij}
-      blitz::Array<double,1> z_first_order_ij = m_cache_z_first_order[i](j,a);
-      // m_tmp_D_2 = B.E{z_ij}
-      bob::math::prod(m_cache_B, z_first_order_ij, m_tmp_D_2);
-      // sigma -= Diag{B.E{z_ij}.(x_ij-mu)
-      sigma -= (m_tmp_D_1 * m_tmp_D_2);
-      ++n_IJ;
-    }
-  }
-  // Normalizes by the number of samples
-  sigma /= static_cast<double>(n_IJ);
-  // Apply variance threshold
-  machine.applyVarianceThreshold();
-}
-
-
-void bob::learn::misc::PLDATrainer::enrol(bob::learn::misc::PLDAMachine& plda_machine,
-  const blitz::Array<double,2>& ar) const
-{
-  // Gets dimension
-  const size_t dim_d = ar.extent(1);
-  const int n_samples = ar.extent(0);
-  // Compare the dimensionality from the base trainer/machine with the one
-  // of the enrollment samples
-  if (plda_machine.getDimD() != dim_d) {
-    boost::format m("the extent of the D dimension of the input machine (%u) does not match the input sample (%u)");
-    m % plda_machine.getDimD() % dim_d;
-    throw std::runtime_error(m.str());
-  }
-  const size_t dim_f = plda_machine.getDimF();
-
-  // Resize working arrays
-  m_tmp_D_1.resize(dim_d);
-  m_tmp_D_2.resize(dim_d);
-  m_tmp_nf_1.resize(dim_f);
-
-  // Useful values from the base machine
-  blitz::Array<double,1>& weighted_sum = plda_machine.updateWeightedSum();
-  const blitz::Array<double,1>& mu = plda_machine.getPLDABase()->getMu();
-  const blitz::Array<double,2>& beta = plda_machine.getPLDABase()->getBeta();
-  const blitz::Array<double,2>& FtBeta = plda_machine.getPLDABase()->getFtBeta();
-
-  // Updates the PLDA machine
-  plda_machine.setNSamples(n_samples);
-  double terma = 0.;
-  weighted_sum = 0.;
-  blitz::Range a = blitz::Range::all();
-  for (int i=0; i<n_samples; ++i) {
-    m_tmp_D_1 =  ar(i,a) - mu;
-    // a/ weighted sum
-    bob::math::prod(FtBeta, m_tmp_D_1, m_tmp_nf_1);
-    weighted_sum += m_tmp_nf_1;
-    // b/ first xi dependent term of the log likelihood
-    bob::math::prod(beta, m_tmp_D_1, m_tmp_D_2);
-    terma += -1 / 2. * blitz::sum(m_tmp_D_1 * m_tmp_D_2);
-  }
-  plda_machine.setWSumXitBetaXi(terma);
-
-  // Adds the precomputed values for the cases N and N+1 if not already
-  // in the base machine (used by the forward function, 1 already added)
-  plda_machine.getAddGamma(n_samples);
-  plda_machine.getAddLogLikeConstTerm(n_samples);
-  plda_machine.getAddGamma(n_samples+1);
-  plda_machine.getAddLogLikeConstTerm(n_samples+1);
-  plda_machine.setLogLikelihood(plda_machine.computeLogLikelihood(
-                                  blitz::Array<double,2>(0,dim_d),true));
-}
diff --git a/bob/learn/misc/cpp/ZTNorm.cpp b/bob/learn/misc/cpp/ZTNorm.cpp
deleted file mode 100644
index 504f31c47ea574be1b61fd80b6056fbfe3c70124..0000000000000000000000000000000000000000
--- a/bob/learn/misc/cpp/ZTNorm.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-/**
- * @date Tue Jul 19 15:33:20 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.learn.misc/ZTNorm.h>
-#include <bob.core/assert.h>
-#include <limits>
-
-
-static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-            const blitz::Array<double,2>* rawscores_zprobes_vs_models,
-            const blitz::Array<double,2>* rawscores_probes_vs_tmodels,
-            const blitz::Array<double,2>* rawscores_zprobes_vs_tmodels,
-            const blitz::Array<bool,2>* mask_zprobes_vs_tmodels_istruetrial,
-            blitz::Array<double,2>& scores)
-{
-  // Rename variables
-  const blitz::Array<double,2>& A = rawscores_probes_vs_models;
-  const blitz::Array<double,2>* B = rawscores_zprobes_vs_models;
-  const blitz::Array<double,2>* C = rawscores_probes_vs_tmodels;
-  const blitz::Array<double,2>* D = rawscores_zprobes_vs_tmodels;
-
-  // Compute the sizes
-  int size_eval  = A.extent(0);
-  int size_enrol = A.extent(1);
-  int size_tnorm = (C ? C->extent(0) : 0);
-  int size_znorm = (B ? B->extent(1) : 0);
-
-  // Check the inputs
-  bob::core::array::assertSameDimensionLength(A.extent(0), size_eval);
-  bob::core::array::assertSameDimensionLength(A.extent(1), size_enrol);
-
-  if (B) {
-    bob::core::array::assertSameDimensionLength(B->extent(1), size_znorm);
-    if (size_znorm > 0)
-      bob::core::array::assertSameDimensionLength(B->extent(0), size_eval);
-  }
-
-  if (C) {
-    bob::core::array::assertSameDimensionLength(C->extent(0), size_tnorm);
-    if (size_tnorm > 0)
-      bob::core::array::assertSameDimensionLength(C->extent(1), size_enrol);
-  }
-
-  if (D && size_znorm > 0 && size_tnorm > 0) {
-    bob::core::array::assertSameDimensionLength(D->extent(0), size_tnorm);
-    bob::core::array::assertSameDimensionLength(D->extent(1), size_znorm);
-  }
-
-  if (mask_zprobes_vs_tmodels_istruetrial) {
-    bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(0), size_tnorm);
-    bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(1), size_znorm);
-  }
-
-  bob::core::array::assertSameDimensionLength(scores.extent(0), size_eval);
-  bob::core::array::assertSameDimensionLength(scores.extent(1), size_enrol);
-
-  // Declare needed IndexPlaceholder
-  blitz::firstIndex ii;
-  blitz::secondIndex jj;
-
-  // Constant to check if the std is close to 0.
-  const double eps = std::numeric_limits<double>::min();
-
-  // zA
-  blitz::Array<double,2> zA(A.shape());
-  if (B && size_znorm > 0) {
-    // Znorm  -->      zA  = (A - mean(B) ) / std(B)    [znorm on oringinal scores]
-    // mean(B)
-    blitz::Array<double,1> mean_B(blitz::mean(*B, jj));
-    // std(B)
-    blitz::Array<double,2> B2n(B->shape());
-    B2n = blitz::pow2((*B)(ii, jj) - mean_B(ii));
-    blitz::Array<double,1> std_B(B->extent(0));
-    if(size_znorm>1)
-      std_B = blitz::sqrt(blitz::sum(B2n, jj) / (size_znorm - 1));
-    else // 1 single value -> std = 0
-      std_B = 0;
-    std_B = blitz::where( std_B <= eps, 1., std_B);
-
-    zA = (A(ii, jj) - mean_B(ii)) / std_B(ii);
-  }
-  else
-    zA = A;
-
-  blitz::Array<double,2> zC(size_tnorm, size_enrol);
-  if (D && size_tnorm > 0 && size_znorm > 0) {
-    blitz::Array<double,1> mean_Dimp(size_tnorm);
-    blitz::Array<double,1> std_Dimp(size_tnorm);
-
-    // Compute mean_Dimp and std_Dimp = D only with impostors
-    for (int i = 0; i < size_tnorm; ++i) {
-      double sum = 0;
-      double sumsq = 0;
-      double count = 0;
-      for (int j = 0; j < size_znorm; ++j) {
-        bool keep;
-        // The second part is never executed if mask_zprobes_vs_tmodels_istruetrial==NULL
-        keep = (mask_zprobes_vs_tmodels_istruetrial == NULL) || !(*mask_zprobes_vs_tmodels_istruetrial)(i, j); //tnorm_models_spk_ids(i) != znorm_tests_spk_ids(j);
-
-        double value = keep * (*D)(i, j);
-        sum += value;
-        sumsq += value*value;
-        count += keep;
-      }
-
-      double mean = sum / count;
-      mean_Dimp(i) = mean;
-      if (count > 1)
-        std_Dimp(i) = sqrt((sumsq - count * mean * mean) / (count -1));
-      else // 1 single value -> std = 0
-        std_Dimp(i) = 0;
-    }
-
-    // zC  = (C - mean(D)) / std(D)     [znorm the tnorm scores]
-    std_Dimp = blitz::where( std_Dimp <= eps, 1., std_Dimp);
-    zC = ((*C)(ii, jj) - mean_Dimp(ii)) / std_Dimp(ii);
-  }
-  else if (C && size_tnorm > 0)
-    zC = *C;
-
-  if (C && size_tnorm > 0)
-  {
-    blitz::Array<double,1> mean_zC(size_enrol);
-    blitz::Array<double,1> std_zC(size_enrol);
-
-    // ztA = (zA - mean(zC)) / std(zC)  [ztnorm on eval scores]
-    mean_zC = blitz::mean(zC(jj, ii), jj);
-    if (size_tnorm > 1)
-      std_zC = sqrt(blitz::sum(pow(zC(jj, ii) - mean_zC(ii), 2) , jj) / (size_tnorm - 1));
-    else // 1 single value -> std = 0
-      std_zC = 0;
-    std_zC = blitz::where( std_zC <= eps, 1., std_zC);
-
-    // Normalised scores
-    scores = (zA(ii, jj) - mean_zC(jj)) /  std_zC(jj);
-  }
-  else
-    scores = zA;
-}
-
-void bob::learn::misc::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
-            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
-            const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
-            const blitz::Array<bool,2>& mask_zprobes_vs_tmodels_istruetrial,
-            blitz::Array<double,2>& scores)
-{
-  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
-                 &rawscores_zprobes_vs_tmodels, &mask_zprobes_vs_tmodels_istruetrial, scores);
-}
-
-void bob::learn::misc::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
-            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
-            const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
-            blitz::Array<double,2>& scores)
-{
-  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
-                 &rawscores_zprobes_vs_tmodels, NULL, scores);
-}
-
-void bob::learn::misc::tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-           const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
-           blitz::Array<double,2>& scores)
-{
-  _ztNorm(rawscores_probes_vs_models, NULL, &rawscores_probes_vs_tmodels,
-                 NULL, NULL, scores);
-}
-
-void bob::learn::misc::zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-           const blitz::Array<double,2>& rawscores_zprobes_vs_models,
-           blitz::Array<double,2>& scores)
-{
-  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, NULL,
-                 NULL, NULL, scores);
-}
-
diff --git a/bob/learn/misc/data/data.hdf5 b/bob/learn/misc/data/data.hdf5
deleted file mode 100644
index 7c406233e19c49f5ab6e2c26d32257fc4e47e54f..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/data.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/dataNormalized.hdf5 b/bob/learn/misc/data/dataNormalized.hdf5
deleted file mode 100644
index ac8d0302ebb312d35a8ee43c1c6195b899643733..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/dataNormalized.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/dataforMAP.hdf5 b/bob/learn/misc/data/dataforMAP.hdf5
deleted file mode 100644
index 9cd7bfe8533daab0a21ae20d342281ecf1afa977..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/dataforMAP.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/faithful.torch3.hdf5 b/bob/learn/misc/data/faithful.torch3.hdf5
deleted file mode 100644
index a508318e6e8bcc528674ab7f9e3594f73ddb8367..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/faithful.torch3.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/faithful.torch3_f64.hdf5 b/bob/learn/misc/data/faithful.torch3_f64.hdf5
deleted file mode 100644
index fe7f81b2bce427f6ab367cb6c7a2a6c1524e0528..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/faithful.torch3_f64.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm.init_means.hdf5 b/bob/learn/misc/data/gmm.init_means.hdf5
deleted file mode 100644
index 0b88738f3883e9b20c8eea20e2b278bf364498b4..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm.init_means.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm.init_variances.hdf5 b/bob/learn/misc/data/gmm.init_variances.hdf5
deleted file mode 100644
index d0687a2ffc6bab5ea8b111c60cd112730af9b758..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm.init_variances.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm.init_weights.hdf5 b/bob/learn/misc/data/gmm.init_weights.hdf5
deleted file mode 100644
index 558faa66b67f5deb0550d2543372667ff45f1e70..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm.init_weights.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm_MAP.hdf5 b/bob/learn/misc/data/gmm_MAP.hdf5
deleted file mode 100644
index 91c5e69141e3042ef5d211fc4098a8d59649d62d..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm_MAP.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm_ML.hdf5 b/bob/learn/misc/data/gmm_ML.hdf5
deleted file mode 100644
index 238cd7e14f5e4ab92e505221f200cdba368cb593..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm_ML.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm_ML_32bit_debug.hdf5 b/bob/learn/misc/data/gmm_ML_32bit_debug.hdf5
deleted file mode 100644
index 275381b7e7573e060009a15220f092cfa323a1eb..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm_ML_32bit_debug.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/gmm_ML_32bit_release.hdf5 b/bob/learn/misc/data/gmm_ML_32bit_release.hdf5
deleted file mode 100644
index 438e9932cecf179d1b834e2f5c19d39a7c906cf3..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/gmm_ML_32bit_release.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/means.hdf5 b/bob/learn/misc/data/means.hdf5
deleted file mode 100644
index 060afde0fb2777065d02c85baf8a34ec1d509fea..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/means.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/meansAfterKMeans.hdf5 b/bob/learn/misc/data/meansAfterKMeans.hdf5
deleted file mode 100644
index 9552dd832998ee19062e4c0b28b335691af25269..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/meansAfterKMeans.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/meansAfterMAP.hdf5 b/bob/learn/misc/data/meansAfterMAP.hdf5
deleted file mode 100644
index ac4cb9383d10c223b26d16d686910c430cf71197..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/meansAfterMAP.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/meansAfterML.hdf5 b/bob/learn/misc/data/meansAfterML.hdf5
deleted file mode 100644
index 857bbe39c29cbb76f18aed3798ce484ed2bcb67d..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/meansAfterML.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/new_adapted_mean.hdf5 b/bob/learn/misc/data/new_adapted_mean.hdf5
deleted file mode 100644
index fc4a8ee30af0d8531302133b2bd2595df07139b8..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/new_adapted_mean.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/samplesFrom2G_f64.hdf5 b/bob/learn/misc/data/samplesFrom2G_f64.hdf5
deleted file mode 100644
index 9ef47fd649fde13d36a15a6ebde122c31047b31b..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/samplesFrom2G_f64.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/stats.hdf5 b/bob/learn/misc/data/stats.hdf5
deleted file mode 100644
index c4a13700ec20079fdaacbd3841e8289910e9dd82..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/stats.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/variances.hdf5 b/bob/learn/misc/data/variances.hdf5
deleted file mode 100644
index c9d6d17bcf73be3bb7800d14604a0201b16f4ada..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/variances.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/variancesAfterKMeans.hdf5 b/bob/learn/misc/data/variancesAfterKMeans.hdf5
deleted file mode 100644
index 2aee23c0ef021e383d34ef2ca47175ecf165a6e9..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/variancesAfterKMeans.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/variancesAfterMAP.hdf5 b/bob/learn/misc/data/variancesAfterMAP.hdf5
deleted file mode 100644
index 47bd4d5f823882eb7de61f3b67946c81acc0e82f..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/variancesAfterMAP.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/variancesAfterML.hdf5 b/bob/learn/misc/data/variancesAfterML.hdf5
deleted file mode 100644
index 472229290b53eb34728dac334a7addb635d314a0..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/variancesAfterML.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/weights.hdf5 b/bob/learn/misc/data/weights.hdf5
deleted file mode 100644
index 41b22801f28b4afc8b3a81daf5e594e85100f29f..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/weights.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/weightsAfterKMeans.hdf5 b/bob/learn/misc/data/weightsAfterKMeans.hdf5
deleted file mode 100644
index b241207eac61c8f47dcb0fafed293748108ba6d8..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/weightsAfterKMeans.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/weightsAfterMAP.hdf5 b/bob/learn/misc/data/weightsAfterMAP.hdf5
deleted file mode 100644
index b6e1f0497f91dfc26e137fc021f02431023db1a7..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/weightsAfterMAP.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/weightsAfterML.hdf5 b/bob/learn/misc/data/weightsAfterML.hdf5
deleted file mode 100644
index 0b3fc2551fc9d1eff310c7cd7c0a5e33d926f0e7..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/weightsAfterML.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/ztnorm_eval_eval.hdf5 b/bob/learn/misc/data/ztnorm_eval_eval.hdf5
deleted file mode 100644
index bc5771861bd444b1ba3d89c6c949e91e912136cf..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/ztnorm_eval_eval.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/ztnorm_eval_tnorm.hdf5 b/bob/learn/misc/data/ztnorm_eval_tnorm.hdf5
deleted file mode 100644
index d98b4d656bbe0a8c75a675ccc09834b30bc4bd00..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/ztnorm_eval_tnorm.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/ztnorm_result.hdf5 b/bob/learn/misc/data/ztnorm_result.hdf5
deleted file mode 100644
index 877c72c77fedb11fe7d39791823f42e58cce1e1c..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/ztnorm_result.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/ztnorm_znorm_eval.hdf5 b/bob/learn/misc/data/ztnorm_znorm_eval.hdf5
deleted file mode 100644
index 5d670ae0b3bd410c40e70b4697f0531fdc7bfb87..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/ztnorm_znorm_eval.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/data/ztnorm_znorm_tnorm.hdf5 b/bob/learn/misc/data/ztnorm_znorm_tnorm.hdf5
deleted file mode 100644
index e2f709ed9a33a014f64e6c69f15c3e549dc7e3ca..0000000000000000000000000000000000000000
Binary files a/bob/learn/misc/data/ztnorm_znorm_tnorm.hdf5 and /dev/null differ
diff --git a/bob/learn/misc/empca_trainer.cpp b/bob/learn/misc/empca_trainer.cpp
deleted file mode 100644
index 1b7a6963c8d22b90c1bc3b769e2d2d84213d2664..0000000000000000000000000000000000000000
--- a/bob/learn/misc/empca_trainer.cpp
+++ /dev/null
@@ -1,378 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Tue 03 Fev 11:22:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto EMPCATrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX "._EMPCATrainer",
-  ""
-
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a EMPCATrainer",
-    "",
-    true
-  )
-  .add_prototype("convergence_threshold","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("other", ":py:class:`bob.learn.misc.EMPCATrainer`", "A EMPCATrainer object to be copied.")
-  .add_parameter("convergence_threshold", "double", "")
-
-);
-
-
-static int PyBobLearnMiscEMPCATrainer_init_copy(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = EMPCATrainer_doc.kwlist(1);
-  PyBobLearnMiscEMPCATrainerObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscEMPCATrainer_Type, &tt)){
-    EMPCATrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::EMPCATrainer(*tt->cxx));
-  return 0;
-}
-
-static int PyBobLearnMiscEMPCATrainer_init_number(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = EMPCATrainer_doc.kwlist(0);
-  double convergence_threshold    = 0.0001;
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &convergence_threshold))
-    return -1;
-
-  if(convergence_threshold < 0){
-    PyErr_Format(PyExc_TypeError, "convergence_threshold argument must be greater than to zero");
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::EMPCATrainer(convergence_threshold));
-  return 0;
-}
-
-static int PyBobLearnMiscEMPCATrainer_init(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  switch (nargs) {
-
-    case 0:{ //default initializer ()
-      self->cxx.reset(new bob::learn::misc::EMPCATrainer());
-      return 0;
-    }
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is EMPCATrainer object
-      if (PyBobLearnMiscEMPCATrainer_Check(arg))
-        return PyBobLearnMiscEMPCATrainer_init_copy(self, args, kwargs);
-      else if(PyString_Check(arg))
-        return PyBobLearnMiscEMPCATrainer_init_number(self, args, kwargs);
-    }
-    default:{
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      EMPCATrainer_doc.print_usage();
-      return -1;
-    }
-  }
-  BOB_CATCH_MEMBER("cannot create EMPCATrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscEMPCATrainer_delete(PyBobLearnMiscEMPCATrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscEMPCATrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscEMPCATrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscEMPCATrainer_RichCompare(PyBobLearnMiscEMPCATrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscEMPCATrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscEMPCATrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare EMPCATrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnMiscEMPCATrainer_getRng(PyBobLearnMiscEMPCATrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("O", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnMiscEMPCATrainer_setRng(PyBobLearnMiscEMPCATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
-
-static PyGetSetDef PyBobLearnMiscEMPCATrainer_getseters[] = { 
-  {
-   rng.name(),
-   (getter)PyBobLearnMiscEMPCATrainer_getRng,
-   (setter)PyBobLearnMiscEMPCATrainer_setRng,
-   rng.doc(),
-   0
-  },
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "",
-  "",
-  true
-)
-.add_prototype("linear_machine,data")
-.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscEMPCATrainer_initialize(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnLinearMachineObject* linear_machine = 0;
-  PyBlitzArrayObject* data                          = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->initialize(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** eStep ***/
-static auto eStep = bob::extension::FunctionDoc(
-  "eStep",
-  "",
-  "",
-  true
-)
-.add_prototype("linear_machine,data")
-.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscEMPCATrainer_eStep(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = eStep.kwlist(0);
-
-  PyBobLearnLinearMachineObject* linear_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->eStep(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-
-  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** mStep ***/
-static auto mStep = bob::extension::FunctionDoc(
-  "mStep",
-  "",
-  0,
-  true
-)
-.add_prototype("linear_machine,data")
-.add_parameter("linear_machine", ":py:class:`bob.learn.misc.LinearMachine`", "LinearMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscEMPCATrainer_mStep(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = mStep.kwlist(0);
-
-  PyBobLearnLinearMachineObject* linear_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->mStep(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-
-  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** computeLikelihood ***/
-static auto compute_likelihood = bob::extension::FunctionDoc(
-  "compute_likelihood",
-  "",
-  0,
-  true
-)
-.add_prototype("linear_machine,data")
-.add_parameter("linear_machine", ":py:class:`bob.learn.misc.LinearMachine`", "LinearMachine Object");
-static PyObject* PyBobLearnMiscEMPCATrainer_compute_likelihood(PyBobLearnMiscEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = compute_likelihood.kwlist(0);
-
-  PyBobLearnLinearMachineObject* linear_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine)) Py_RETURN_NONE;
-
-  double value = self->cxx->computeLikelihood(*linear_machine->cxx);
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
-}
-
-
-
-static PyMethodDef PyBobLearnMiscEMPCATrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscEMPCATrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    eStep.name(),
-    (PyCFunction)PyBobLearnMiscEMPCATrainer_eStep,
-    METH_VARARGS|METH_KEYWORDS,
-    eStep.doc()
-  },
-  {
-    mStep.name(),
-    (PyCFunction)PyBobLearnMiscEMPCATrainer_mStep,
-    METH_VARARGS|METH_KEYWORDS,
-    mStep.doc()
-  },
-  {
-    compute_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscEMPCATrainer_compute_likelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_likelihood.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscEMPCATrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscEMPCATrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscEMPCATrainer_Type.tp_name = EMPCATrainer_doc.name();
-  PyBobLearnMiscEMPCATrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscEMPCATrainerObject);
-  PyBobLearnMiscEMPCATrainer_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
-  PyBobLearnMiscEMPCATrainer_Type.tp_doc = EMPCATrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscEMPCATrainer_Type.tp_new = PyType_GenericNew;
-  PyBobLearnMiscEMPCATrainer_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnMiscEMPCATrainer_init);
-  PyBobLearnMiscEMPCATrainer_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnMiscEMPCATrainer_delete);
-  PyBobLearnMiscEMPCATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscEMPCATrainer_RichCompare);
-  PyBobLearnMiscEMPCATrainer_Type.tp_methods = PyBobLearnMiscEMPCATrainer_methods;
-  PyBobLearnMiscEMPCATrainer_Type.tp_getset = PyBobLearnMiscEMPCATrainer_getseters;
-  PyBobLearnMiscEMPCATrainer_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscEMPCATrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscEMPCATrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscEMPCATrainer_Type);
-  return PyModule_AddObject(module, "_EMPCATrainer", (PyObject*)&PyBobLearnMiscEMPCATrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/gaussian.cpp b/bob/learn/misc/gaussian.cpp
deleted file mode 100644
index 5ab2a7d029a378abee4fb1d310d0d1fb3d5a6033..0000000000000000000000000000000000000000
--- a/bob/learn/misc/gaussian.cpp
+++ /dev/null
@@ -1,571 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Fri 21 Nov 10:38:48 2013
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto Gaussian_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".Gaussian",
-  "This class implements a multivariate diagonal Gaussian distribution"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructs a new multivariate gaussian object",
-    "",
-    true
-  )
-  .add_prototype("n_inputs","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-  .add_prototype("","")
-
-  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
-  .add_parameter("other", ":py:class:`bob.learn.misc.GMMStats`", "A GMMStats object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-);
-
-
-
-static int PyBobLearnMiscGaussian_init_number(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = Gaussian_doc.kwlist(0);
-  int n_inputs=1;
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &n_inputs))
-    return -1;
-
-  if(n_inputs < 0){
-    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
-    Gaussian_doc.print_usage();
-    return -1;
-   }
-
-  self->cxx.reset(new bob::learn::misc::Gaussian(n_inputs));
-  return 0;
-}
-
-static int PyBobLearnMiscGaussian_init_copy(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = Gaussian_doc.kwlist(1);
-  PyBobLearnMiscGaussianObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGaussian_Type, &tt)){
-    Gaussian_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::Gaussian(*tt->cxx));
-  return 0;
-}
-
-static int PyBobLearnMiscGaussian_init_hdf5(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = Gaussian_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    Gaussian_doc.print_usage();
-    return -1;
-  }
-  
-  try {
-    self->cxx.reset(new bob::learn::misc::Gaussian(*(config->f)));
-  }
-  catch (std::exception& ex) {
-    PyErr_SetString(PyExc_RuntimeError, ex.what());
-    return -1;
-  }
-  catch (...) {
-    PyErr_Format(PyExc_RuntimeError, "cannot create new object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
-    return -1;
-  }
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscGaussian_init(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-
-  // get the number of command line arguments
-  Py_ssize_t nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-  if (nargs==0){
-    self->cxx.reset(new bob::learn::misc::Gaussian());
-    return 0;
-  }
-
-  //Reading the input argument
-  PyObject* arg = 0;
-  if (PyTuple_Size(args)) 
-    arg = PyTuple_GET_ITEM(args, 0);
-  else {
-    PyObject* tmp = PyDict_Values(kwargs);
-    auto tmp_ = make_safe(tmp);
-    arg = PyList_GET_ITEM(tmp, 0);
-  }
-
-  /**If the constructor input is a number**/
-  if (PyNumber_Check(arg)) 
-    return PyBobLearnMiscGaussian_init_number(self, args, kwargs);
-  /**If the constructor input is Gaussian object**/
-  else if (PyBobLearnMiscGaussian_Check(arg))
-    return PyBobLearnMiscGaussian_init_copy(self, args, kwargs);
-  /**If the constructor input is a HDF5**/
-  else if (PyBobIoHDF5File_Check(arg))
-    return PyBobLearnMiscGaussian_init_hdf5(self, args, kwargs);
-  else
-    PyErr_Format(PyExc_TypeError, "invalid input argument");
-    Gaussian_doc.print_usage();
-    return -1;
-
-  BOB_CATCH_MEMBER("cannot create Gaussian", -1)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscGaussian_delete(PyBobLearnMiscGaussianObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscGaussian_RichCompare(PyBobLearnMiscGaussianObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscGaussian_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscGaussianObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare Gaussian objects", 0)
-}
-
-int PyBobLearnMiscGaussian_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscGaussian_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** MEAN *****/
-static auto mean = bob::extension::VariableDoc(
-  "mean",
-  "array_like <double, 1D>",
-  "Mean of the Gaussian",
-  ""
-);
-PyObject* PyBobLearnMiscGaussian_getMean(PyBobLearnMiscGaussianObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMean());
-  BOB_CATCH_MEMBER("mean could not be read", 0)
-}
-int PyBobLearnMiscGaussian_setMean(PyBobLearnMiscGaussianObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, mean.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mean");
-  if (!b) return -1;
-  self->cxx->setMean(*b);
-  return 0;
-  BOB_CATCH_MEMBER("mean could not be set", -1)
-}
-
-/***** Variance *****/
-static auto variance = bob::extension::VariableDoc(
-  "variance",
-  "array_like <double, 1D>",
-  "Variance of the Gaussian",
-  ""
-);
-PyObject* PyBobLearnMiscGaussian_getVariance(PyBobLearnMiscGaussianObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVariance());
-  BOB_CATCH_MEMBER("variance could not be read", 0)
-}
-int PyBobLearnMiscGaussian_setVariance(PyBobLearnMiscGaussianObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variance.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance");
-  if (!b) return -1;
-  self->cxx->setVariance(*b);
-  return 0;
-  BOB_CATCH_MEMBER("variance could not be set", -1)
-}
-
-
-/***** variance_thresholds *****/
-static auto variance_thresholds = bob::extension::VariableDoc(
-  "variance_thresholds",
-  "array_like <double, 1D>",
-  "The variance flooring thresholds, i.e. the minimum allowed value of variance in each dimension. ",
-  "The variance will be set to this value if an attempt is made to set it to a smaller value."
-);
-PyObject* PyBobLearnMiscGaussian_getVarianceThresholds(PyBobLearnMiscGaussianObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVarianceThresholds());
-  BOB_CATCH_MEMBER("variance_thresholds could not be read", 0)
-}
-int PyBobLearnMiscGaussian_setVarianceThresholds(PyBobLearnMiscGaussianObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, variance_thresholds.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance_thresholds");
-  if (!b) return -1;
-  self->cxx->setVarianceThresholds(*b);
-  return 0;
-  BOB_CATCH_MEMBER("variance_thresholds could not be set", -1)  
-}
-
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int)",
-  "A tuple that represents the dimensionality of the Gaussian ``(dim,)``.",
-  ""
-);
-PyObject* PyBobLearnMiscGaussian_getShape(PyBobLearnMiscGaussianObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i)", self->cxx->getNInputs());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-static PyGetSetDef PyBobLearnMiscGaussian_getseters[] = {
-    {
-      mean.name(),
-      (getter)PyBobLearnMiscGaussian_getMean,
-      (setter)PyBobLearnMiscGaussian_setMean,
-      mean.doc(),
-      0
-    },
-    {
-      variance.name(),
-      (getter)PyBobLearnMiscGaussian_getVariance,
-      (setter)PyBobLearnMiscGaussian_setVariance,
-      variance.doc(),
-     0
-     },
-     {
-      variance_thresholds.name(),
-      (getter)PyBobLearnMiscGaussian_getVarianceThresholds,
-      (setter)PyBobLearnMiscGaussian_setVarianceThresholds,
-      variance_thresholds.doc(),
-      0
-     },
-     {
-      shape.name(),
-      (getter)PyBobLearnMiscGaussian_getShape,
-      0,
-      shape.doc(),
-      0
-     },
-
-    {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Set the input dimensionality, reset the mean to zero and the variance to one."
-)
-.add_prototype("input")
-.add_parameter("input", "int", "Dimensionality of the feature vector");
-static PyObject* PyBobLearnMiscGaussian_resize(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int input = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &input)) Py_RETURN_NONE;
-  if (input <= 0){
-    PyErr_Format(PyExc_TypeError, "input must be greater than zero");
-    resize.print_usage();
-    Py_RETURN_NONE;
-  }
-  self->cxx->setNInputs(input);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-/*** log_likelihood ***/
-static auto log_likelihood = bob::extension::FunctionDoc(
-  "log_likelihood",
-  "Output the log likelihood of the sample, x. The input size is checked.",
-  ".. note:: The :py:meth:`__call__` function is an alias for this.", 
-  true
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <double, 1D>", "Input vector")
-.add_return("output","float","The log likelihood");
-static PyObject* PyBobLearnMiscGaussian_loglikelihood(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = log_likelihood.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
-}
-
-
-/*** log_likelihood_ ***/
-static auto log_likelihood_ = bob::extension::FunctionDoc(
-  "log_likelihood_",
-  "Output the log likelihood given a sample. The input size is NOT checked."
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <double, 1D>", "Input vector")
-.add_return("output","double","The log likelihood");
-static PyObject* PyBobLearnMiscGaussian_loglikelihood_(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  char** kwlist = log_likelihood_.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
-}
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the Gassian Machine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing")
-;
-static PyObject* PyBobLearnMiscGaussian_Save(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the Gassian Machine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscGaussian_Load(PyBobLearnMiscGaussianObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-  
-  BOB_CATCH_MEMBER("cannot load the data", 0)    
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this Gaussian with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` and any other values internal to this machine.",
-  true
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.Gaussian`", "A gaussian to be compared.")
-.add_parameter("[r_epsilon]", "float", "Relative precision.")
-.add_parameter("[a_epsilon]", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscGaussian_IsSimilarTo(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  PyBobLearnMiscGaussianObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscGaussian_Type, &other,
-        &r_epsilon, &a_epsilon)) return 0;
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** set_variance_thresholds ***/
-static auto set_variance_thresholds = bob::extension::FunctionDoc(
-  "set_variance_thresholds",
-  "Set the variance flooring thresholds equal to the given threshold for all the dimensions."
-)
-.add_prototype("input")
-.add_parameter("input","float","Threshold")
-;
-static PyObject* PyBobLearnMiscGaussian_SetVarianceThresholds(PyBobLearnMiscGaussianObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = set_variance_thresholds.kwlist(0);
-
-  double input = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &input)) return 0;
-
-  self->cxx->setVarianceThresholds(input);
-
-  BOB_CATCH_MEMBER("cannot perform the set_variance_Thresholds method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-static PyMethodDef PyBobLearnMiscGaussian_methods[] = {
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  {
-    log_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_loglikelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    log_likelihood.doc()
-  },
-  {
-    log_likelihood_.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_loglikelihood_,
-    METH_VARARGS|METH_KEYWORDS,
-    log_likelihood_.doc()
-  },
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    set_variance_thresholds.name(),
-    (PyCFunction)PyBobLearnMiscGaussian_SetVarianceThresholds,
-    METH_VARARGS|METH_KEYWORDS,
-    set_variance_thresholds.doc()
-  },
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscGaussian_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscGaussian(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscGaussian_Type.tp_name = Gaussian_doc.name();
-  PyBobLearnMiscGaussian_Type.tp_basicsize = sizeof(PyBobLearnMiscGaussianObject);
-  PyBobLearnMiscGaussian_Type.tp_flags = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscGaussian_Type.tp_doc = Gaussian_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscGaussian_Type.tp_new = PyType_GenericNew;
-  PyBobLearnMiscGaussian_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnMiscGaussian_init);
-  PyBobLearnMiscGaussian_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnMiscGaussian_delete);
-  PyBobLearnMiscGaussian_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscGaussian_RichCompare);
-  PyBobLearnMiscGaussian_Type.tp_methods = PyBobLearnMiscGaussian_methods;
-  PyBobLearnMiscGaussian_Type.tp_getset = PyBobLearnMiscGaussian_getseters;
-  PyBobLearnMiscGaussian_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscGaussian_loglikelihood);
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscGaussian_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscGaussian_Type);
-  return PyModule_AddObject(module, "Gaussian", (PyObject*)&PyBobLearnMiscGaussian_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/gmm_base_trainer.cpp b/bob/learn/misc/gmm_base_trainer.cpp
deleted file mode 100644
index 0308f16453640bfdc169c3c34b197e1ea5f2c40e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/gmm_base_trainer.cpp
+++ /dev/null
@@ -1,437 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Web 21 Jan 12:30:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-#include <boost/make_shared.hpp>
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-static auto GMMBaseTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".GMMBaseTrainer",
-  "This class implements the E-step of the expectation-maximisation"
-  "algorithm for a :py:class:`bob.learn.misc.GMMMachine`"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a GMMBaseTrainer",
-    "",
-    true
-  )
-  .add_prototype("update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("update_means", "bool", "Update means on each iteration")
-  .add_parameter("update_variances", "bool", "Update variances on each iteration")
-  .add_parameter("update_weights", "bool", "Update weights on each iteration")
-  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
-  .add_parameter("other", ":py:class:`bob.learn.misc.GMMBaseTrainer`", "A GMMBaseTrainer object to be copied.")
-);
-
-
-
-static int PyBobLearnMiscGMMBaseTrainer_init_copy(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMBaseTrainer_doc.kwlist(1);
-  PyBobLearnMiscGMMBaseTrainerObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMBaseTrainer_Type, &tt)){
-    GMMBaseTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::GMMBaseTrainer(*tt->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscGMMBaseTrainer_init_bool(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMBaseTrainer_doc.kwlist(0);
-  PyObject* update_means     = 0;
-  PyObject* update_variances = 0;
-  PyObject* update_weights   = 0;
-  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!O!d", kwlist, &PyBool_Type, &update_means, &PyBool_Type, 
-                                                             &update_variances, &PyBool_Type, &update_weights, &mean_var_update_responsibilities_threshold)){
-    GMMBaseTrainer_doc.print_usage();
-    return -1;
-  }
-  self->cxx.reset(new bob::learn::misc::GMMBaseTrainer(f(update_means), f(update_variances), f(update_weights), mean_var_update_responsibilities_threshold));
-  return 0;
-}
-
-
-static int PyBobLearnMiscGMMBaseTrainer_init(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if (nargs==0){ //default initializer ()
-    self->cxx.reset(new bob::learn::misc::GMMBaseTrainer());
-    return 0;
-  }
-  else{
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is GMMBaseTrainer object
-    if (PyBobLearnMiscGMMBaseTrainer_Check(arg))
-      return PyBobLearnMiscGMMBaseTrainer_init_copy(self, args, kwargs);
-    else
-      return PyBobLearnMiscGMMBaseTrainer_init_bool(self, args, kwargs);
-  }
-
-  BOB_CATCH_MEMBER("cannot create GMMBaseTrainer_init_bool", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscGMMBaseTrainer_delete(PyBobLearnMiscGMMBaseTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscGMMBaseTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscGMMBaseTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscGMMBaseTrainer_RichCompare(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMBaseTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscGMMBaseTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare GMMBaseTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-
-/***** gmm_stats *****/
-static auto gmm_stats = bob::extension::VariableDoc(
-  "gmm_stats",
-  ":py:class:`bob.learn.misc.GMMStats`",
-  "Get/Set GMMStats",
-  ""
-);
-PyObject* PyBobLearnMiscGMMBaseTrainer_getGMMStats(PyBobLearnMiscGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-
-  bob::learn::misc::GMMStats stats = self->cxx->getGMMStats();
-  boost::shared_ptr<bob::learn::misc::GMMStats> stats_shared = boost::make_shared<bob::learn::misc::GMMStats>(stats);
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscGMMStatsObject* retval =
-    (PyBobLearnMiscGMMStatsObject*)PyBobLearnMiscGMMStats_Type.tp_alloc(&PyBobLearnMiscGMMStats_Type, 0);
-
-  retval->cxx = stats_shared;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("GMMStats could not be read", 0)
-}
-/*
-int PyBobLearnMiscGMMBaseTrainer_setGMMStats(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMStats_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.GMMStats`", Py_TYPE(self)->tp_name, gmm_stats.name());
-    return -1;
-  }
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscGMMStats_Type,&stats);
-
-  self->cxx->setGMMStats(*stats->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("gmm_stats could not be set", -1)  
-}
-*/
-
-
-/***** update_means *****/
-static auto update_means = bob::extension::VariableDoc(
-  "update_means",
-  "bool",
-  "Update means on each iteration",
-  ""
-);
-PyObject* PyBobLearnMiscGMMBaseTrainer_getUpdateMeans(PyBobLearnMiscGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("O",self->cxx->getUpdateMeans()?Py_True:Py_False);
-  BOB_CATCH_MEMBER("update_means could not be read", 0)
-}
-
-/***** update_variances *****/
-static auto update_variances = bob::extension::VariableDoc(
-  "update_variances",
-  "bool",
-  "Update variances on each iteration",
-  ""
-);
-PyObject* PyBobLearnMiscGMMBaseTrainer_getUpdateVariances(PyBobLearnMiscGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("O",self->cxx->getUpdateVariances()?Py_True:Py_False);
-  BOB_CATCH_MEMBER("update_variances could not be read", 0)
-}
-
-
-/***** update_weights *****/
-static auto update_weights = bob::extension::VariableDoc(
-  "update_weights",
-  "bool",
-  "Update weights on each iteration",
-  ""
-);
-PyObject* PyBobLearnMiscGMMBaseTrainer_getUpdateWeights(PyBobLearnMiscGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("O",self->cxx->getUpdateWeights()?Py_True:Py_False);
-  BOB_CATCH_MEMBER("update_weights could not be read", 0)
-}
-
-
-    
-     
-
-/***** mean_var_update_responsibilities_threshold *****/
-static auto mean_var_update_responsibilities_threshold = bob::extension::VariableDoc(
-  "mean_var_update_responsibilities_threshold",
-  "bool",
-  "Threshold over the responsibilities of the Gaussians" 
-  "Equations 9.24, 9.25 of Bishop, \"Pattern recognition and machine learning\", 2006" 
-  "require a division by the responsibilities, which might be equal to zero" 
-  "because of numerical issue. This threshold is used to avoid such divisions.",
-  ""
-);
-PyObject* PyBobLearnMiscGMMBaseTrainer_getMeanVarUpdateResponsibilitiesThreshold(PyBobLearnMiscGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getMeanVarUpdateResponsibilitiesThreshold());
-  BOB_CATCH_MEMBER("update_weights could not be read", 0)
-}
-
-
-static PyGetSetDef PyBobLearnMiscGMMBaseTrainer_getseters[] = { 
-  {
-    update_means.name(),
-    (getter)PyBobLearnMiscGMMBaseTrainer_getUpdateMeans,
-    0,
-    update_means.doc(),
-    0
-  },
-  {
-    update_variances.name(),
-    (getter)PyBobLearnMiscGMMBaseTrainer_getUpdateVariances,
-    0,
-    update_variances.doc(),
-    0
-  },
-  {
-    update_weights.name(),
-    (getter)PyBobLearnMiscGMMBaseTrainer_getUpdateWeights,
-    0,
-    update_weights.doc(),
-    0
-  },  
-  {
-    mean_var_update_responsibilities_threshold.name(),
-    (getter)PyBobLearnMiscGMMBaseTrainer_getMeanVarUpdateResponsibilitiesThreshold,
-    0,
-    mean_var_update_responsibilities_threshold.doc(),
-    0
-  },  
-  {
-    gmm_stats.name(),
-    (getter)PyBobLearnMiscGMMBaseTrainer_getGMMStats,
-    0, //(setter)PyBobLearnMiscGMMBaseTrainer_setGMMStats,
-    gmm_stats.doc(),
-    0
-  },  
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "Instanciate :py:class:`bob.learn.misc.GMMStats`",
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscGMMBaseTrainer_initialize(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  self->cxx->initialize(*gmm_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-/*** eStep ***/
-static auto eStep = bob::extension::FunctionDoc(
-  "eStep",
-  "Calculates and saves statistics across the dataset,"
-  "and saves these as m_ss. ",
-
-  "Calculates the average log likelihood of the observations given the GMM,"
-  "and returns this in average_log_likelihood."
-  "The statistics, m_ss, will be used in the mStep() that follows.",
-
-  true
-)
-.add_prototype("gmm_machine,data")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscGMMBaseTrainer_eStep(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = eStep.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** computeLikelihood ***/
-static auto compute_likelihood = bob::extension::FunctionDoc(
-  "compute_likelihood",
-  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
-  0,
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.misc.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnMiscGMMBaseTrainer_compute_likelihood(PyBobLearnMiscGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = compute_likelihood.kwlist(0);
-
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
-}
-
-
-static PyMethodDef PyBobLearnMiscGMMBaseTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscGMMBaseTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    eStep.name(),
-    (PyCFunction)PyBobLearnMiscGMMBaseTrainer_eStep,
-    METH_VARARGS|METH_KEYWORDS,
-    eStep.doc()
-  },
-  {
-    compute_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscGMMBaseTrainer_compute_likelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_likelihood.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscGMMBaseTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscGMMBaseTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_name      = GMMBaseTrainer_doc.name();
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscGMMBaseTrainerObject);
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_doc       = GMMBaseTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscGMMBaseTrainer_init);
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscGMMBaseTrainer_delete);
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscGMMBaseTrainer_RichCompare);
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_methods      = PyBobLearnMiscGMMBaseTrainer_methods;
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_getset       = PyBobLearnMiscGMMBaseTrainer_getseters;
-  PyBobLearnMiscGMMBaseTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscGMMBaseTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscGMMBaseTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscGMMBaseTrainer_Type);
-  return PyModule_AddObject(module, "GMMBaseTrainer", (PyObject*)&PyBobLearnMiscGMMBaseTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/gmm_machine.cpp b/bob/learn/misc/gmm_machine.cpp
deleted file mode 100644
index 0b21d684515a8dc487386816966e52dcccd8cd30..0000000000000000000000000000000000000000
--- a/bob/learn/misc/gmm_machine.cpp
+++ /dev/null
@@ -1,834 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Wed 11 Dec 18:01:00 2014
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto GMMMachine_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".GMMMachine",
-  "This class implements a multivariate diagonal Gaussian distribution.",
-  "See Section 2.3.9 of Bishop, \"Pattern recognition and machine learning\", 2006"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a GMMMachine",
-    "",
-    true
-  )
-  .add_prototype("n_gaussians,n_inputs","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-  .add_prototype("","")
-
-  .add_parameter("n_gaussians", "int", "Number of gaussians")
-  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
-  .add_parameter("other", ":py:class:`bob.learn.misc.GMMMachine`", "A GMMMachine object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscGMMMachine_init_number(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMMachine_doc.kwlist(0);
-  int n_inputs    = 1;
-  int n_gaussians = 1;
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs))
-    return -1;
-
-  if(n_gaussians < 0){
-    PyErr_Format(PyExc_TypeError, "gaussians argument must be greater than or equal to zero");
-    return -1;
-  }
-
-  if(n_inputs < 0){
-    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
-    return -1;
-   }
-
-  self->cxx.reset(new bob::learn::misc::GMMMachine(n_gaussians, n_inputs));
-  return 0;
-}
-
-
-static int PyBobLearnMiscGMMMachine_init_copy(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMMachine_doc.kwlist(1);
-  PyBobLearnMiscGMMMachineObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMMachine_Type, &tt)){
-    GMMMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::GMMMachine(*tt->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscGMMMachine_init_hdf5(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMMachine_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    GMMMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::GMMMachine(*(config->f)));
-
-  return 0;
-}
-
-
-
-static int PyBobLearnMiscGMMMachine_init(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-  
-  switch (nargs) {
-
-    case 0: //default initializer ()
-      self->cxx.reset(new bob::learn::misc::GMMMachine());
-      return 0;
-
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is Gaussian object
-     if (PyBobLearnMiscGMMMachine_Check(arg))
-       return PyBobLearnMiscGMMMachine_init_copy(self, args, kwargs);
-      // If the constructor input is a HDF5
-     else if (PyBobIoHDF5File_Check(arg))
-       return PyBobLearnMiscGMMMachine_init_hdf5(self, args, kwargs);
-    }
-    case 2:
-      return PyBobLearnMiscGMMMachine_init_number(self, args, kwargs);
-    default:
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0, 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      GMMMachine_doc.print_usage();
-      return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create GMMMachine", 0)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscGMMMachine_delete(PyBobLearnMiscGMMMachineObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscGMMMachine_RichCompare(PyBobLearnMiscGMMMachineObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMMachine_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscGMMMachineObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare GMMMachine objects", 0)
-}
-
-int PyBobLearnMiscGMMMachine_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscGMMMachine_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int)",
-  "A tuple that represents the number of gaussians and dimensionality of each Gaussian ``(n_gaussians, dim)``.",
-  ""
-);
-PyObject* PyBobLearnMiscGMMMachine_getShape(PyBobLearnMiscGMMMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** MEAN *****/
-
-static auto means = bob::extension::VariableDoc(
-  "means",
-  "array_like <float, 2D>",
-  "The means of the gaussians",
-  ""
-);
-PyObject* PyBobLearnMiscGMMMachine_getMeans(PyBobLearnMiscGMMMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMeans());
-  BOB_CATCH_MEMBER("means could not be read", 0)
-}
-int PyBobLearnMiscGMMMachine_setMeans(PyBobLearnMiscGMMMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, means.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "means");
-  if (!b) return -1;
-  self->cxx->setMeans(*b);
-  return 0;
-  BOB_CATCH_MEMBER("means could not be set", -1)
-}
-
-/***** Variance *****/
-static auto variances = bob::extension::VariableDoc(
-  "variances",
-  "array_like <float, 2D>",
-  "Variances of the gaussians",
-  ""
-);
-PyObject* PyBobLearnMiscGMMMachine_getVariances(PyBobLearnMiscGMMMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVariances());
-  BOB_CATCH_MEMBER("variances could not be read", 0)
-}
-int PyBobLearnMiscGMMMachine_setVariances(PyBobLearnMiscGMMMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variances.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "variances");
-  if (!b) return -1;
-  self->cxx->setVariances(*b);
-  return 0;
-  BOB_CATCH_MEMBER("variances could not be set", -1)
-}
-
-/***** Weights *****/
-static auto weights = bob::extension::VariableDoc(
-  "weights",
-  "array_like <float, 1D>",
-  "The weights (also known as \"mixing coefficients\")",
-  ""
-);
-PyObject* PyBobLearnMiscGMMMachine_getWeights(PyBobLearnMiscGMMMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getWeights());
-  BOB_CATCH_MEMBER("weights could not be read", 0)
-}
-int PyBobLearnMiscGMMMachine_setWeights(PyBobLearnMiscGMMMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, weights.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "weights");
-  if (!b) return -1;
-  self->cxx->setWeights(*b);
-  return 0;
-  BOB_CATCH_MEMBER("weights could not be set", -1)
-}
-
-
-/***** variance_supervector *****/
-static auto variance_supervector = bob::extension::VariableDoc(
-  "variance_supervector",
-  "array_like <float, 1D>",
-  "The variance supervector of the GMMMachine",
-  "Concatenation of the variance vectors of each Gaussian of the GMMMachine"
-);
-PyObject* PyBobLearnMiscGMMMachine_getVarianceSupervector(PyBobLearnMiscGMMMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVarianceSupervector());
-  BOB_CATCH_MEMBER("variance_supervector could not be read", 0)
-}
-int PyBobLearnMiscGMMMachine_setVarianceSupervector(PyBobLearnMiscGMMMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, variance_supervector.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance_supervector");
-  if (!b) return -1;
-  self->cxx->setVarianceSupervector(*b);
-  return 0;
-  BOB_CATCH_MEMBER("variance_supervector could not be set", -1)
-}
-
-/***** mean_supervector *****/
-static auto mean_supervector = bob::extension::VariableDoc(
-  "mean_supervector",
-  "array_like <float, 1D>",
-  "The mean supervector of the GMMMachine",
-  "Concatenation of the mean vectors of each Gaussian of the GMMMachine"
-);
-PyObject* PyBobLearnMiscGMMMachine_getMeanSupervector(PyBobLearnMiscGMMMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMeanSupervector());
-  BOB_CATCH_MEMBER("mean_supervector could not be read", 0)
-}
-int PyBobLearnMiscGMMMachine_setMeanSupervector(PyBobLearnMiscGMMMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, mean_supervector.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mean_supervector");
-  if (!b) return -1;
-  self->cxx->setMeanSupervector(*b);
-  return 0;
-  BOB_CATCH_MEMBER("mean_supervector could not be set", -1)
-}
-
-
-
-/***** variance_thresholds *****/
-static auto variance_thresholds = bob::extension::VariableDoc(
-  "variance_thresholds",
-  "array_like <double, 2D>",
-  "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar. ",
-  ""
-);
-PyObject* PyBobLearnMiscGMMMachine_getVarianceThresholds(PyBobLearnMiscGMMMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVarianceThresholds());
-  BOB_CATCH_MEMBER("variance_thresholds could not be read", 0)
-}
-int PyBobLearnMiscGMMMachine_setVarianceThresholds(PyBobLearnMiscGMMMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variance_thresholds.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "variance_thresholds");
-  if (!b) return -1;
-  self->cxx->setVarianceThresholds(*b);
-  return 0;
-  BOB_CATCH_MEMBER("variance_thresholds could not be set", -1)  
-}
-
-
-
-
-static PyGetSetDef PyBobLearnMiscGMMMachine_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscGMMMachine_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  {
-   means.name(),
-   (getter)PyBobLearnMiscGMMMachine_getMeans,
-   (setter)PyBobLearnMiscGMMMachine_setMeans,
-   means.doc(),
-   0
-  },
-  {
-   variances.name(),
-   (getter)PyBobLearnMiscGMMMachine_getVariances,
-   (setter)PyBobLearnMiscGMMMachine_setVariances,
-   variances.doc(),
-   0
-  },
-  {
-   weights.name(),
-   (getter)PyBobLearnMiscGMMMachine_getWeights,
-   (setter)PyBobLearnMiscGMMMachine_setWeights,
-   weights.doc(),
-   0
-  },
-  {
-   variance_thresholds.name(),
-   (getter)PyBobLearnMiscGMMMachine_getVarianceThresholds,
-   (setter)PyBobLearnMiscGMMMachine_setVarianceThresholds,
-   variance_thresholds.doc(),
-   0
-  },
-  {
-   variance_supervector.name(),
-   (getter)PyBobLearnMiscGMMMachine_getVarianceSupervector,
-   (setter)PyBobLearnMiscGMMMachine_setVarianceSupervector,
-   variance_supervector.doc(),
-   0
-  },
-
-  {
-   mean_supervector.name(),
-   (getter)PyBobLearnMiscGMMMachine_getMeanSupervector,
-   (setter)PyBobLearnMiscGMMMachine_setMeanSupervector,
-   mean_supervector.doc(),
-   0
-  },
-  
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the GMMMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscGMMMachine_Save(PyBobLearnMiscGMMMachineObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the GMMMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscGMMMachine_Load(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this GMMMachine with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.GMMMachine`", "A GMMMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscGMMMachine_IsSimilarTo(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscGMMMachineObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscGMMMachine_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Allocates space for the statistics and resets to zero.",
-  0,
-  true
-)
-.add_prototype("n_gaussians,n_inputs")
-.add_parameter("n_gaussians", "int", "Number of gaussians")
-.add_parameter("n_inputs", "int", "Dimensionality of the feature vector");
-static PyObject* PyBobLearnMiscGMMMachine_resize(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int n_gaussians = 0;
-  int n_inputs = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs)) Py_RETURN_NONE;
-
-  if (n_gaussians <= 0){
-    PyErr_Format(PyExc_TypeError, "n_gaussians must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-  if (n_inputs <= 0){
-    PyErr_Format(PyExc_TypeError, "n_inputs must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-
-  self->cxx->resize(n_gaussians, n_inputs);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** log_likelihood ***/
-static auto log_likelihood = bob::extension::FunctionDoc(
-  "log_likelihood",
-  "Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are checked.",
-  ".. note:: The :py:meth:`__call__` function is an alias for this.", 
-  true
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <float, 1D>", "Input vector")
-.add_return("output","float","The log likelihood");
-static PyObject* PyBobLearnMiscGMMMachine_loglikelihood(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = log_likelihood.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
-}
-
-
-/*** log_likelihood_ ***/
-static auto log_likelihood_ = bob::extension::FunctionDoc(
-  "log_likelihood_",
-  "Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are NOT checked.",
-  "", 
-  true
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <float, 1D>", "Input vector")
-.add_return("output","float","The log likelihood");
-static PyObject* PyBobLearnMiscGMMMachine_loglikelihood_(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = log_likelihood_.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
-}
-
-
-/*** acc_statistics ***/
-static auto acc_statistics = bob::extension::FunctionDoc(
-  "acc_statistics",
-  "Accumulate the GMM statistics for this sample(s). Inputs are checked.",
-  "", 
-  true
-)
-.add_prototype("input,stats")
-.add_parameter("input", "array_like <float, 2D>", "Input vector")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics of the GMM");
-static PyObject* PyBobLearnMiscGMMMachine_accStatistics(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = acc_statistics.kwlist(0);
-
-  PyBlitzArrayObject* input           = 0;
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input, 
-                                                                 &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-  self->cxx->accStatistics(*PyBlitzArrayCxx_AsBlitz<double,2>(input), *stats->cxx);
-
-  BOB_CATCH_MEMBER("cannot accumulate the statistics", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** acc_statistics_ ***/
-static auto acc_statistics_ = bob::extension::FunctionDoc(
-  "acc_statistics_",
-  "Accumulate the GMM statistics for this sample(s). Inputs are NOT checked.",
-  "", 
-  true
-)
-.add_prototype("input,stats")
-.add_parameter("input", "array_like <float, 2D>", "Input vector")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics of the GMM");
-static PyObject* PyBobLearnMiscGMMMachine_accStatistics_(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = acc_statistics_.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-
-
-
- if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input, 
-                                                                 &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-  self->cxx->accStatistics_(*PyBlitzArrayCxx_AsBlitz<double,2>(input), *stats->cxx);
-
-  BOB_CATCH_MEMBER("cannot accumulate the statistics", 0)
-  Py_RETURN_NONE;
-}
-
-
-
-/*** set_variance_thresholds ***/
-static auto set_variance_thresholds = bob::extension::FunctionDoc(
-  "set_variance_thresholds",
-  "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar.",
-  "",
-  true
-)
-.add_prototype("input")
-.add_parameter("input", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscGMMMachine_setVarianceThresholds_method(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = set_variance_thresholds.kwlist(0);
-
-  PyBlitzArrayObject* input_array = 0;
-  double input_number = 0;
-  if(PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &input_number)){
-    self->cxx->setVarianceThresholds(input_number);
-  }
-  else if(PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter,&input_array)) {
-    //protects acquired resources through this scope
-    auto input_ = make_safe(input_array);
-    self->cxx->setVarianceThresholds(*PyBlitzArrayCxx_AsBlitz<double,1>(input_array));
-  }
-  else
-    return 0;
-
-
-
-  BOB_CATCH_MEMBER("cannot accumulate set the variance threshold", 0)
-  Py_RETURN_NONE;
-}
-
-
-
-
-/*** get_gaussian ***/
-static auto get_gaussian = bob::extension::FunctionDoc(
-  "get_gaussian",
-  "Get the specified Gaussian component.",
-  ".. note:: An exception is thrown if i is out of range.", 
-  true
-)
-.add_prototype("i","gaussian")
-.add_parameter("i", "int", "Index of the gaussian")
-.add_return("gaussian",":py:class:`bob.learn.misc.Gaussian`","Gaussian object");
-static PyObject* PyBobLearnMiscGMMMachine_get_gaussian(PyBobLearnMiscGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_gaussian.kwlist(0);
-
-  int i = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
- 
-  boost::shared_ptr<bob::learn::misc::Gaussian> gaussian = self->cxx->getGaussian(i);
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscGaussianObject* retval =
-    (PyBobLearnMiscGaussianObject*)PyBobLearnMiscGaussian_Type.tp_alloc(&PyBobLearnMiscGaussian_Type, 0);
-
-  retval->cxx = gaussian;
-   
-  //return reinterpret_cast<PyObject*>(retval);
-  return Py_BuildValue("O",retval);
-
-  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
-}
-
-
-
-static PyMethodDef PyBobLearnMiscGMMMachine_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  {
-    log_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_loglikelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    log_likelihood.doc()
-  },
-  {
-    log_likelihood_.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_loglikelihood_,
-    METH_VARARGS|METH_KEYWORDS,
-    log_likelihood_.doc()
-  },
-  {
-    acc_statistics.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_accStatistics,
-    METH_VARARGS|METH_KEYWORDS,
-    acc_statistics.doc()
-  },
-  {
-    acc_statistics_.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_accStatistics_,
-    METH_VARARGS|METH_KEYWORDS,
-    acc_statistics_.doc()
-  },
- 
-  {
-    get_gaussian.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_get_gaussian,
-    METH_VARARGS|METH_KEYWORDS,
-    get_gaussian.doc()
-  },
-
-  {
-    set_variance_thresholds.name(),
-    (PyCFunction)PyBobLearnMiscGMMMachine_setVarianceThresholds_method,
-    METH_VARARGS|METH_KEYWORDS,
-    set_variance_thresholds.doc()
-  },
-  
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscGMMMachine_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscGMMMachine(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscGMMMachine_Type.tp_name = GMMMachine_doc.name();
-  PyBobLearnMiscGMMMachine_Type.tp_basicsize = sizeof(PyBobLearnMiscGMMMachineObject);
-  PyBobLearnMiscGMMMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscGMMMachine_Type.tp_doc = GMMMachine_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscGMMMachine_Type.tp_new = PyType_GenericNew;
-  PyBobLearnMiscGMMMachine_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnMiscGMMMachine_init);
-  PyBobLearnMiscGMMMachine_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnMiscGMMMachine_delete);
-  PyBobLearnMiscGMMMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscGMMMachine_RichCompare);
-  PyBobLearnMiscGMMMachine_Type.tp_methods = PyBobLearnMiscGMMMachine_methods;
-  PyBobLearnMiscGMMMachine_Type.tp_getset = PyBobLearnMiscGMMMachine_getseters;
-  PyBobLearnMiscGMMMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscGMMMachine_loglikelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscGMMMachine_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscGMMMachine_Type);
-  return PyModule_AddObject(module, "GMMMachine", (PyObject*)&PyBobLearnMiscGMMMachine_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/gmm_stats.cpp b/bob/learn/misc/gmm_stats.cpp
deleted file mode 100644
index 9f859ffbebe56bd0e367082d9e4a47fdbd074056..0000000000000000000000000000000000000000
--- a/bob/learn/misc/gmm_stats.cpp
+++ /dev/null
@@ -1,623 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Wed 03 Dec 14:38:48 2014
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto GMMStats_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".GMMStats",
-  "A container for GMM statistics",
-  "With respect to [Reynolds2000]_ the class computes: \n\n"
-  "* Eq (8) is :py:class:`bob.learn.misc.GMMStats.n`: :math:`n_i=\\sum\\limits_{t=1}^T Pr(i | x_t)`\n\n"
-  "* Eq (9) is :py:class:`bob.learn.misc.GMMStats.sum_px`:  :math:`E_i(x)=\\frac{1}{n(i)}\\sum\\limits_{t=1}^T Pr(i | x_t)x_t`\n\n"
-  "* Eq (10) is :py:class:`bob.learn.misc.GMMStats.sum_pxx`: :math:`E_i(x^2)=\\frac{1}{n(i)}\\sum\\limits_{t=1}^T Pr(i | x_t)x_t^2`\n\n"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "A container for GMM statistics.",
-    "",
-    true
-  )
-  .add_prototype("n_gaussians,n_inputs","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-  .add_prototype("","")
-
-  .add_parameter("n_gaussians", "int", "Number of gaussians")
-  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
-  .add_parameter("other", ":py:class:`bob.learn.misc.GMMStats`", "A GMMStats object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscGMMStats_init_number(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMStats_doc.kwlist(0);
-  int n_inputs    = 1;
-  int n_gaussians = 1;
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs))
-    return -1;
-
-  if(n_gaussians < 0){
-    PyErr_Format(PyExc_TypeError, "gaussians argument must be greater than or equal to zero");
-    GMMStats_doc.print_usage();
-    return -1;
-  }
-
-  if(n_inputs < 0){
-    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
-    GMMStats_doc.print_usage();
-    return -1;
-   }
-
-  self->cxx.reset(new bob::learn::misc::GMMStats(n_gaussians, n_inputs));
-  return 0;
-}
-
-
-static int PyBobLearnMiscGMMStats_init_copy(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMStats_doc.kwlist(1);
-  PyBobLearnMiscGMMStatsObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMStats_Type, &tt)){
-    GMMStats_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::GMMStats(*tt->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscGMMStats_init_hdf5(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMStats_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    GMMStats_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::GMMStats(*(config->f)));
-
-  return 0;
-}
-
-
-
-static int PyBobLearnMiscGMMStats_init(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  switch (nargs) {
-
-    case 0: //default initializer ()
-      self->cxx.reset(new bob::learn::misc::GMMStats());
-      return 0;
-
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      /**If the constructor input is Gaussian object**/	
-     if (PyBobLearnMiscGMMStats_Check(arg))
-       return PyBobLearnMiscGMMStats_init_copy(self, args, kwargs);
-      /**If the constructor input is a HDF5**/
-     else if (PyBobIoHDF5File_Check(arg))
-       return PyBobLearnMiscGMMStats_init_hdf5(self, args, kwargs);
-    }
-    case 2:
-      return PyBobLearnMiscGMMStats_init_number(self, args, kwargs);
-    default:
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0, 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      GMMStats_doc.print_usage();
-      return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create GMMStats", 0)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscGMMStats_delete(PyBobLearnMiscGMMStatsObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscGMMStats_RichCompare(PyBobLearnMiscGMMStatsObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMStats_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscGMMStatsObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare GMMStats objects", 0)
-}
-
-int PyBobLearnMiscGMMStats_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscGMMStats_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** n *****/
-static auto n = bob::extension::VariableDoc(
-  "n",
-  "array_like <float, 1D>",
-  "For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of :math:`P(gaussian_i|x)`"
-);
-PyObject* PyBobLearnMiscGMMStats_getN(PyBobLearnMiscGMMStatsObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->n);
-  BOB_CATCH_MEMBER("n could not be read", 0)
-}
-int PyBobLearnMiscGMMStats_setN(PyBobLearnMiscGMMStatsObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, n.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "n");
-  if (!b) return -1;
-  self->cxx->n = *b;
-  return 0;
-  BOB_CATCH_MEMBER("n could not be set", -1)  
-}
-
-
-/***** sum_px *****/
-static auto sum_px = bob::extension::VariableDoc(
-  "sum_px",
-  "array_like <float, 2D>",
-  "For each Gaussian, the accumulated sum of responsibility times the sample"
-);
-PyObject* PyBobLearnMiscGMMStats_getSum_px(PyBobLearnMiscGMMStatsObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->sumPx);
-  BOB_CATCH_MEMBER("sum_px could not be read", 0)
-}
-int PyBobLearnMiscGMMStats_setSum_px(PyBobLearnMiscGMMStatsObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, sum_px.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "sum_px");
-  if (!b) return -1;
-  self->cxx->sumPx = *b;
-  return 0;
-  BOB_CATCH_MEMBER("sum_px could not be set", -1)  
-}
-
-
-/***** sum_pxx *****/
-static auto sum_pxx = bob::extension::VariableDoc(
-  "sum_pxx",
-  "array_like <float, 2D>",
-  "For each Gaussian, the accumulated sum of responsibility times the sample squared"
-);
-PyObject* PyBobLearnMiscGMMStats_getSum_pxx(PyBobLearnMiscGMMStatsObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->sumPxx);
-  BOB_CATCH_MEMBER("sum_pxx could not be read", 0)
-}
-int PyBobLearnMiscGMMStats_setSum_pxx(PyBobLearnMiscGMMStatsObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, sum_pxx.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "sum_pxx");
-  if (!b) return -1;
-  self->cxx->sumPxx = *b;
-  return 0;
-  BOB_CATCH_MEMBER("sum_pxx could not be set", -1)  
-}
-
-
-/***** t *****/
-static auto t = bob::extension::VariableDoc(
-  "t",
-  "int",
-  "The number of samples"
-);
-PyObject* PyBobLearnMiscGMMStats_getT(PyBobLearnMiscGMMStatsObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("i", self->cxx->T);
-  BOB_CATCH_MEMBER("t could not be read", 0)
-}
-int PyBobLearnMiscGMMStats_setT(PyBobLearnMiscGMMStatsObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyInt_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an int", Py_TYPE(self)->tp_name, t.name());
-    return -1;
-  }
-
-  if (PyInt_AS_LONG(value) < 0){
-    PyErr_Format(PyExc_TypeError, "t must be greater than or equal to zero");
-    return -1;
-  }
-
-  self->cxx->T = PyInt_AS_LONG(value);
-  BOB_CATCH_MEMBER("t could not be set", -1)
-  return 0;
-}
-
-
-/***** log_likelihood *****/
-static auto log_likelihood = bob::extension::VariableDoc(
-  "log_likelihood",
-  "double",
-  "The accumulated log likelihood of all samples"
-);
-PyObject* PyBobLearnMiscGMMStats_getLog_likelihood(PyBobLearnMiscGMMStatsObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("d","log_likelihood", self->cxx->log_likelihood);
-  BOB_CATCH_MEMBER("log_likelihood could not be read", 0)
-}
-int PyBobLearnMiscGMMStats_setLog_likelihood(PyBobLearnMiscGMMStatsObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, t.name());
-    return -1;
-  }
-
-  self->cxx->log_likelihood = PyFloat_AsDouble(value);
-  return 0;
-  BOB_CATCH_MEMBER("log_likelihood could not be set", -1)
-}
-
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int)",
-  "A tuple that represents the number of gaussians and dimensionality of each Gaussian ``(n_gaussians, dim)``.",
-  ""
-);
-PyObject* PyBobLearnMiscGMMStats_getShape(PyBobLearnMiscGMMStatsObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i)", self->cxx->sumPx.shape()[0], self->cxx->sumPx.shape()[1]);
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-
-
-static PyGetSetDef PyBobLearnMiscGMMStats_getseters[] = {
-  {
-    n.name(),
-    (getter)PyBobLearnMiscGMMStats_getN,
-    (setter)PyBobLearnMiscGMMStats_setN,
-    n.doc(),
-    0
-  },
-  {
-    sum_px.name(),
-    (getter)PyBobLearnMiscGMMStats_getSum_px,
-    (setter)PyBobLearnMiscGMMStats_setSum_px,
-    sum_px.doc(),
-    0
-  },
-  {
-    sum_pxx.name(),
-    (getter)PyBobLearnMiscGMMStats_getSum_pxx,
-    (setter)PyBobLearnMiscGMMStats_setSum_pxx,
-    sum_pxx.doc(),
-    0
-  },
-  {
-    t.name(),
-    (getter)PyBobLearnMiscGMMStats_getT,
-    (setter)PyBobLearnMiscGMMStats_setT,
-    t.doc(),
-    0
-  },
-  {
-    log_likelihood.name(),
-    (getter)PyBobLearnMiscGMMStats_getLog_likelihood,
-    (setter)PyBobLearnMiscGMMStats_setLog_likelihood,
-    log_likelihood.doc(),
-    0
-  },  
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscGMMStats_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the GMMStats to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscGMMStats_Save(PyBobLearnMiscGMMStatsObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the GMMStats to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscGMMStats_Load(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this GMMStats with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.GMMStats`", "A GMMStats object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscGMMStats_IsSimilarTo(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscGMMStatsObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscGMMStats_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Allocates space for the statistics and resets to zero.",
-  0,
-  true
-)
-.add_prototype("n_gaussians,n_inputs")
-.add_parameter("n_gaussians", "int", "Number of gaussians")
-.add_parameter("n_inputs", "int", "Dimensionality of the feature vector");
-static PyObject* PyBobLearnMiscGMMStats_resize(PyBobLearnMiscGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int n_gaussians = 0;
-  int n_inputs = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs)) Py_RETURN_NONE;
-
-  if (n_gaussians <= 0){
-    PyErr_Format(PyExc_TypeError, "n_gaussians must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-  if (n_inputs <= 0){
-    PyErr_Format(PyExc_TypeError, "n_inputs must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-
-
-  self->cxx->resize(n_gaussians, n_inputs);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** init ***/
-static auto init = bob::extension::FunctionDoc(
-  "init",
-  " Resets statistics to zero."
-)
-.add_prototype("");
-static PyObject* PyBobLearnMiscGMMStats_init_method(PyBobLearnMiscGMMStatsObject* self) {
-  BOB_TRY
-
-  self->cxx->init();
-
-  BOB_CATCH_MEMBER("cannot perform the init method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-static PyMethodDef PyBobLearnMiscGMMStats_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscGMMStats_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscGMMStats_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscGMMStats_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscGMMStats_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  {
-    init.name(),
-    (PyCFunction)PyBobLearnMiscGMMStats_init_method,
-    METH_NOARGS,
-    init.doc()
-  },
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Operators *******************************************/
-/******************************************************************/
-
-static PyBobLearnMiscGMMStatsObject* PyBobLearnMiscGMMStats_inplaceadd(PyBobLearnMiscGMMStatsObject* self, PyObject* other) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMStats_Check(other)){
-    PyErr_Format(PyExc_TypeError, "expected bob.learn.misc.GMMStats object");
-    return 0;
-  }
-
-  auto other_ = reinterpret_cast<PyBobLearnMiscGMMStatsObject*>(other);
-
-  self->cxx->operator+=(*other_->cxx);
-
-  BOB_CATCH_MEMBER("it was not possible to process the operator +=", 0)
-
-  Py_INCREF(self);
-  return self;
-}
-
-static PyNumberMethods PyBobLearnMiscGMMStats_operators = {0};
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscGMMStats_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscGMMStats(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscGMMStats_Type.tp_name = GMMStats_doc.name();
-  PyBobLearnMiscGMMStats_Type.tp_basicsize = sizeof(PyBobLearnMiscGMMStatsObject);
-  PyBobLearnMiscGMMStats_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_INPLACEOPS;
-  PyBobLearnMiscGMMStats_Type.tp_doc = GMMStats_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscGMMStats_Type.tp_new = PyType_GenericNew;
-  PyBobLearnMiscGMMStats_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnMiscGMMStats_init);
-  PyBobLearnMiscGMMStats_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnMiscGMMStats_delete);
-  PyBobLearnMiscGMMStats_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscGMMStats_RichCompare);
-  PyBobLearnMiscGMMStats_Type.tp_methods = PyBobLearnMiscGMMStats_methods;
-  PyBobLearnMiscGMMStats_Type.tp_getset = PyBobLearnMiscGMMStats_getseters;
-  PyBobLearnMiscGMMStats_Type.tp_call = 0;
-  PyBobLearnMiscGMMStats_Type.tp_as_number = &PyBobLearnMiscGMMStats_operators;
-
-  //set operators
-  PyBobLearnMiscGMMStats_operators.nb_inplace_add = reinterpret_cast<binaryfunc>(PyBobLearnMiscGMMStats_inplaceadd);
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscGMMStats_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscGMMStats_Type);
-  return PyModule_AddObject(module, "GMMStats", (PyObject*)&PyBobLearnMiscGMMStats_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h b/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h
deleted file mode 100644
index 753c78cfb9b8af361edb66e4dce5214a6c2464cb..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/**
- * @date Tue Oct 11 12:18:23 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Expectation Maximization Algorithm for Principal Component
- * Analysis
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_EMPCA_TRAINER_H
-#define BOB_LEARN_MISC_EMPCA_TRAINER_H
-
-#include <bob.learn.linear/machine.h>
-#include <blitz/array.h>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief Trains a linear machine using an Expectation-Maximization algorithm
- * on the given dataset.\n
- * References:\n
- *  1. "Probabilistic Principal Component Analysis",
- *     Michael Tipping and Christopher Bishop,
- *     Journal of the Royal Statistical Society,
- *      Series B, 61, Part 3, pp. 611–622\n
- *  2. "EM Algorithms for PCA and SPCA",
- *     Sam Roweis, Neural Information Processing Systems 10 (NIPS'97),
- *     pp.626-632 (Sensible Principal Component Analysis part)\n
- *
- * Notations used are the ones from reference 1.\n
- * The probabilistic model is given by: \f$t = W x + \mu + \epsilon\f$\n
- *  - \f$t\f$ is the observed data (dimension \f$f\f$)\n
- *  - \f$W\f$ is a  projection matrix (dimension \f$f \times d\f$)\n
- *  - \f$x\f$ is the projected data (dimension \f$d < f\f$)\n
- *  - \f$\mu\f$ is the mean of the data (dimension \f$f\f$)\n
- *  - \f$\epsilon\f$ is the noise of the data (dimension \f$f\f$)
- *      Gaussian with zero-mean and covariance matrix \f$\sigma^2 Id\f$
- */
-class EMPCATrainer
-{
-  public: //api
-    /**
-     * @brief Initializes a new EM PCA trainer. The training stage will place the
-     * resulting components in the linear machine and set it up to
-     * extract the variable means automatically.
-     */
-    EMPCATrainer(bool compute_likelihood=true);
-
-    /**
-     * @brief Copy constructor
-     */
-    EMPCATrainer(const EMPCATrainer& other);
-
-    /**
-     * @brief (virtual) Destructor
-     */
-    virtual ~EMPCATrainer();
-
-    /**
-     * @brief Assignment operator
-     */
-    EMPCATrainer& operator=(const EMPCATrainer& other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const EMPCATrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const EMPCATrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const EMPCATrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief This methods performs some initialization before the EM loop.
-     */
-    virtual void initialize(bob::learn::linear::Machine& machine,
-      const blitz::Array<double,2>& ar);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset, and saves
-     * these as m_z_{first,second}_order.
-     *
-     * The statistics will be used in the mStep() that follows.
-     */
-    virtual void eStep(bob::learn::linear::Machine& machine,
-      const blitz::Array<double,2>& ar);
-
-    /**
-     * @brief Performs a maximization step to update the parameters of the
-     * factor analysis model.
-     */
-    virtual void mStep(bob::learn::linear::Machine& machine,
-       const blitz::Array<double,2>& ar);
-
-    /**
-     * @brief Computes the average log likelihood using the current estimates
-     * of the latent variables.
-     */
-    virtual double computeLikelihood(bob::learn::linear::Machine& machine);
-
-    /**
-     * @brief Sets \f$\sigma^2\f$ (Mostly for test purpose)
-     */
-    void setSigma2(double sigma2) { m_sigma2 = sigma2; }
-
-    /**
-     * @brief Gets \f$\sigma^2\f$ (Mostly for test purpose)
-     */
-    double getSigma2() const { return m_sigma2; }
-
-    /**
-     * @brief Sets the Random Number Generator
-     */
-    void setRng(const boost::shared_ptr<boost::mt19937> rng)
-    { m_rng = rng; }
-
-    /**
-     * @brief Gets the Random Number Generator
-     */
-    const boost::shared_ptr<boost::mt19937> getRng() const
-    { return m_rng; }
-
-
-  private: //representation
-
-    bool m_compute_likelihood;
-    boost::shared_ptr<boost::mt19937> m_rng;
-
-    blitz::Array<double,2> m_S; /// Covariance of the training data (required only if we need to compute the log likelihood)
-    blitz::Array<double,2> m_z_first_order; /// Current mean of the \f$z_{n}\f$ latent variable
-    blitz::Array<double,3> m_z_second_order; /// Current covariance of the \f$z_{n}\f$ latent variable
-    blitz::Array<double,2> m_inW; /// The matrix product \f$W^T W\f$
-    blitz::Array<double,2> m_invM; /// The matrix \f$inv(M)\f$, where \f$M = W^T W + \sigma^2 Id\f$
-    double m_sigma2; /// The variance \f$sigma^2\f$ of the noise epsilon of the probabilistic model
-    double m_f_log2pi; /// The constant \f$n_{features} log(2*\pi)\f$ used during the likelihood computation
-
-    // Working arrays
-    mutable blitz::Array<double,2> m_tmp_dxf; /// size dimensionality x n_features
-    mutable blitz::Array<double,1> m_tmp_d; /// size dimensionality
-    mutable blitz::Array<double,1> m_tmp_f; /// size n_features
-    mutable blitz::Array<double,2> m_tmp_dxd_1; /// size dimensionality x dimensionality
-    mutable blitz::Array<double,2> m_tmp_dxd_2; /// size dimensionality x dimensionality
-    mutable blitz::Array<double,2> m_tmp_fxd_1; /// size n_features x dimensionality
-    mutable blitz::Array<double,2> m_tmp_fxd_2; /// size n_features x dimensionality
-    mutable blitz::Array<double,2> m_tmp_fxf_1; /// size n_features x n_features
-    mutable blitz::Array<double,2> m_tmp_fxf_2; /// size n_features x n_features
-
-
-    /**
-     * @brief Initializes/resizes the (array) members
-     */
-    void initMembers(const bob::learn::linear::Machine& machine,
-      const blitz::Array<double,2>& ar);
-    /**
-     * @brief Computes the mean and the variance (if required) of the training
-     * data
-     */
-    void computeMeanVariance(bob::learn::linear::Machine& machine,
-      const blitz::Array<double,2>& ar);
-    /**
-     * @brief Random initialization of \f$W\f$ and \f$sigma^2\f$.
-     * W is the projection matrix (from the LinearMachine)
-     */
-    void initRandomWSigma2(bob::learn::linear::Machine& machine);
-    /**
-     * @brief Computes the product \f$W^T W\f$.
-     * \f$W\f$ is the projection matrix (from the LinearMachine)
-     */
-    void computeWtW(bob::learn::linear::Machine& machine);
-    /**
-     * @brief Computes the inverse of \f$M\f$ matrix, where
-     *   \f$M = W^T W + \sigma^2 Id\f$.
-     *   \f$W\f$ is the projection matrix (from the LinearMachine)
-     */
-    void computeInvM();
-    /**
-     * @brief M-Step (part 1): Computes the new estimate of \f$W\f$ using the
-     * new estimated statistics.
-     */
-    void updateW(bob::learn::linear::Machine& machine,
-       const blitz::Array<double,2>& ar);
-    /**
-     * @brief M-Step (part 2): Computes the new estimate of \f$\sigma^2\f$ using
-     * the new estimated statistics.
-     */
-    void updateSigma2(bob::learn::linear::Machine& machine,
-       const blitz::Array<double,2>& ar);
-};
-
-} } } // namespaces
-
-#endif /* BOB_LEARN_MISC_EMPCA_TRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/FABase.h b/bob/learn/misc/include/bob.learn.misc/FABase.h
deleted file mode 100644
index 35b1667823dcb40ca4f4d059b9ec9d2a8bf62462..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/FABase.h
+++ /dev/null
@@ -1,293 +0,0 @@
-/**
- * @date Tue Jan 27 15:51:15 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief A base class for Factor Analysis
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_FABASE_H
-#define BOB_LEARN_MISC_FABASE_H
-
-#include <stdexcept>
-
-#include <bob.learn.misc/GMMMachine.h>
-#include <boost/shared_ptr.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief A FA Base class which contains U, V and D matrices
- * TODO: add a reference to the journal articles
- */
-class FABase
-{
-  public:
-    /**
-     * @brief Default constructor. Builds an otherwise invalid 0 x 0 FABase
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     */
-    FABase();
-
-    /**
-     * @brief Constructor. Builds a new FABase.
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     *
-     * @param ubm The Universal Background Model
-     * @param ru size of U (CD x ru)
-     * @param rv size of U (CD x rv)
-     * @warning ru and rv SHOULD BE  >= 1. Just set U/V/D to zero if you want
-     *   to ignore one subspace. This is the case for ISV.
-     */
-    FABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
-
-    /**
-     * @brief Copy constructor
-     */
-    FABase(const FABase& other);
-
-    /**
-     * @brief Just to virtualise the destructor
-     */
-    virtual ~FABase();
-
-    /**
-     * @brief Assigns from a different JFA machine
-     */
-    FABase& operator=(const FABase &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const FABase& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const FABase& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const FABase& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Returns the UBM
-     */
-    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
-    { return m_ubm; }
-
-    /**
-     * @brief Returns the U matrix
-     */
-    const blitz::Array<double,2>& getU() const
-    { return m_U; }
-
-    /**
-     * @brief Returns the V matrix
-     */
-    const blitz::Array<double,2>& getV() const
-    { return m_V; }
-
-    /**
-     * @brief Returns the diagonal matrix diag(d) (as a 1D vector)
-     */
-    const blitz::Array<double,1>& getD() const
-    { return m_d; }
-
-    /**
-     * @brief Returns the UBM mean supervector (as a 1D vector)
-     */
-    const blitz::Array<double,1>& getUbmMean() const
-    { return m_cache_mean; }
-
-    /**
-     * @brief Returns the UBM variance supervector (as a 1D vector)
-     */
-    const blitz::Array<double,1>& getUbmVariance() const
-    { return m_cache_sigma; }
-
-    /**
-     * @brief Returns the number of Gaussian components C
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNGaussians() const
-    { if(!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
-      return m_ubm->getNGaussians(); }
-
-    /**
-     * @brief Returns the feature dimensionality D
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNInputs() const
-    { if(!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
-      return m_ubm->getNInputs(); }
-
-    /**
-     * @brief Returns the supervector length CD
-     * (CxD: Number of Gaussian components by the feature dimensionality)
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getSupervectorLength() const
-    { if(!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
-      return m_ubm->getNInputs()*m_ubm->getNGaussians(); }
-
-    /**
-     * @brief Returns the size/rank ru of the U matrix
-     */
-    const size_t getDimRu() const
-    { return m_ru; }
-
-    /**
-     * @brief Returns the size/rank rv of the V matrix
-     */
-    const size_t getDimRv() const
-    { return m_rv; }
-
-    /**
-     * @brief Resets the dimensionality of the subspace U and V
-     * U and V are hence uninitialized.
-     */
-    void resize(const size_t ru, const size_t rv);
-
-    /**
-     * @brief Resets the dimensionality of the subspace U and V,
-     * assuming that no UBM has yet been set
-     * U and V are hence uninitialized.
-     */
-    void resize(const size_t ru, const size_t rv, const size_t cd);
-
-    /**
-     * @brief Returns the U matrix in order to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,2>& updateU()
-    { return m_U; }
-
-    /**
-     * @brief Returns the V matrix in order to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,2>& updateV()
-    { return m_V; }
-
-    /**
-     * @brief Returns the diagonal matrix diag(d) (as a 1D vector) in order
-     * to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,1>& updateD()
-    { return m_d; }
-
-
-    /**
-     * @brief Sets (the mean supervector of) the Universal Background Model
-     * U, V and d are uninitialized in case of dimensions update (C or D)
-     */
-    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm);
-
-    /**
-     * @brief Sets the U matrix
-     */
-    void setU(const blitz::Array<double,2>& U);
-
-    /**
-     * @brief Sets the V matrix
-     */
-    void setV(const blitz::Array<double,2>& V);
-
-    /**
-     * @brief Sets the diagonal matrix diag(d)
-     * (a 1D vector is expected as an argument)
-     */
-    void setD(const blitz::Array<double,1>& d);
-
-
-    /**
-     * @brief Estimates x from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const;
-
-    /**
-     * @brief Compute and put U^{T}.Sigma^{-1} matrix in cache
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    void updateCacheUbmUVD();
-
-
-  private:
-    /**
-     * @brief Update cache arrays/variables
-     */
-    void updateCache();
-    /**
-     * @brief Put GMM mean/variance supervector in cache
-     */
-    void updateCacheUbm();
-    /**
-     * @brief Resize working arrays
-     */
-    void resizeTmp();
-    /**
-     * @brief Computes (Id + U^T.Sigma^-1.U.N_{i,h}.U)^-1 =
-     *   (Id + sum_{c=1..C} N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c})^-1
-     */
-    void computeIdPlusUSProdInv(const bob::learn::misc::GMMStats& gmm_stats,
-      blitz::Array<double,2>& out) const;
-    /**
-     * @brief Computes Fn_x = sum_{sessions h}(N*(o - m))
-     * (Normalised first order statistics)
-     */
-    void computeFn_x(const bob::learn::misc::GMMStats& gmm_stats,
-      blitz::Array<double,1>& out) const;
-    /**
-     * @brief Estimates the value of x from the passed arguments
-     * (IdPlusUSProdInv and Fn_x), considering the LPT assumption
-     */
-    void estimateX(const blitz::Array<double,2>& IdPlusUSProdInv,
-      const blitz::Array<double,1>& Fn_x, blitz::Array<double,1>& x) const;
-
-
-    // UBM
-    boost::shared_ptr<bob::learn::misc::GMMMachine> m_ubm;
-
-    // dimensionality
-    size_t m_ru; // size of U (CD x ru)
-    size_t m_rv; // size of V (CD x rv)
-
-    // U, V, D matrices
-    // D is assumed to be diagonal, and only the diagonal is stored
-    blitz::Array<double,2> m_U;
-    blitz::Array<double,2> m_V;
-    blitz::Array<double,1> m_d;
-
-    // Vectors/Matrices precomputed in cache
-    blitz::Array<double,1> m_cache_mean;
-    blitz::Array<double,1> m_cache_sigma;
-    blitz::Array<double,2> m_cache_UtSigmaInv;
-
-    mutable blitz::Array<double,2> m_tmp_IdPlusUSProdInv;
-    mutable blitz::Array<double,1> m_tmp_Fn_x;
-    mutable blitz::Array<double,1> m_tmp_ru;
-    mutable blitz::Array<double,2> m_tmp_ruD;
-    mutable blitz::Array<double,2> m_tmp_ruru;
-};
-
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_FABASE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h b/bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h
deleted file mode 100644
index c5b2734257fc47e9659f3f81d3245a19ccb4a087..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h
+++ /dev/null
@@ -1,350 +0,0 @@
-/**
- * @date Sat Jan 31 17:16:17 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief FABaseTrainer functions
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_FABASETRAINER_H
-#define BOB_LEARN_MISC_FABASETRAINER_H
-
-#include <blitz/array.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.learn.misc/JFAMachine.h>
-#include <vector>
-
-#include <map>
-#include <string>
-#include <bob.core/array_copy.h>
-#include <boost/shared_ptr.hpp>
-#include <boost/random.hpp>
-#include <bob.core/logging.h>
-
-namespace bob { namespace learn { namespace misc {
-
-class FABaseTrainer
-{
-  public:
-    /**
-     * @brief Constructor
-     */
-    FABaseTrainer();
-
-    /**
-     * @brief Copy constructor
-     */
-    FABaseTrainer(const FABaseTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    ~FABaseTrainer();
-
-    /**
-     * @brief Check that the dimensionality of the statistics match.
-     */
-    void checkStatistics(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Initialize the dimensionality, the UBM, the sums of the
-     * statistics and the number of identities.
-     */
-    void initUbmNidSumStatistics(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Precomputes the sums of the zeroth order statistics over the
-     * sessions for each client
-     */
-    void precomputeSumStatisticsN(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Precomputes the sums of the first order statistics over the
-     * sessions for each client
-     */
-    void precomputeSumStatisticsF(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Initializes (allocates and sets to zero) the x, y, z speaker
-     * factors
-     */
-    void initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Resets the x, y, z speaker factors to zero values
-     */
-    void resetXYZ();
-
-
-    /**** Y and V functions ****/
-    /**
-     * @brief Computes Vt * diag(sigma)^-1
-     */
-    void computeVtSigmaInv(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes Vt_{c} * diag(sigma)^-1 * V_{c} for each Gaussian c
-     */
-    void computeVProd(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes (I+Vt*diag(sigma)^-1*Ni*V)^-1 which occurs in the y
-     * estimation for the given person
-     */
-    void computeIdPlusVProd_i(const size_t id);
-    /**
-     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-     * which occurs in the y estimation of the given person
-     */
-    void computeFn_y_i(const bob::learn::misc::FABase& m,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats,
-      const size_t id);
-    /**
-     * @brief Updates y_i (of the current person) and the accumulators to
-     * compute V with the cache values m_cache_IdPlusVprod_i, m_VtSigmaInv and
-     * m_cache_Fn_y_i
-     */
-    void updateY_i(const size_t id);
-    /**
-     * @brief Updates y and the accumulators to compute V
-     */
-    void updateY(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Computes the accumulators m_acc_V_A1 and m_acc_V_A2 for V
-     * V = A2 * A1^-1
-     */
-    void computeAccumulatorsV(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Updates V from the accumulators m_acc_V_A1 and m_acc_V_A2
-     */
-    void updateV(blitz::Array<double,2>& V);
-
-
-    /**** X and U functions ****/
-    /**
-     * @brief Computes Ut * diag(sigma)^-1
-     */
-    void computeUtSigmaInv(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes Ut_{c} * diag(sigma)^-1 * U_{c} for each Gaussian c
-     */
-    void computeUProd(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes (I+Ut*diag(sigma)^-1*Ni*U)^-1 which occurs in the x
-     * estimation
-     */
-    void computeIdPlusUProd_ih(const boost::shared_ptr<bob::learn::misc::GMMStats>& stats);
-    /**
-     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-     * which occurs in the y estimation of the given person
-     */
-    void computeFn_x_ih(const bob::learn::misc::FABase& m,
-      const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id);
-    /**
-     * @brief Updates x_ih (of the current person/session) and the
-     * accumulators to compute U with the cache values m_cache_IdPlusVprod_i,
-     * m_VtSigmaInv and m_cache_Fn_y_i
-     */
-    void updateX_ih(const size_t id, const size_t h);
-    /**
-     * @brief Updates x
-     */
-    void updateX(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Computes the accumulators m_acc_U_A1 and m_acc_U_A2 for U
-     * U = A2 * A1^-1
-     */
-    void computeAccumulatorsU(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Updates U from the accumulators m_acc_U_A1 and m_acc_U_A2
-     */
-    void updateU(blitz::Array<double,2>& U);
-
-
-    /**** z and D functions ****/
-    /**
-     * @brief Computes diag(D) * diag(sigma)^-1
-     */
-    void computeDtSigmaInv(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes Dt_{c} * diag(sigma)^-1 * D_{c} for each Gaussian c
-     */
-    void computeDProd(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes (I+diag(d)t*diag(sigma)^-1*Ni*diag(d))^-1 which occurs
-     * in the z estimation for the given person
-     */
-    void computeIdPlusDProd_i(const size_t id);
-    /**
-     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
-     * which occurs in the y estimation of the given person
-     */
-    void computeFn_z_i(const bob::learn::misc::FABase& m,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id);
-    /**
-     * @brief Updates z_i (of the current person) and the accumulators to
-     * compute D with the cache values m_cache_IdPlusDProd_i, m_VtSigmaInv
-     * and m_cache_Fn_z_i
-     */
-    void updateZ_i(const size_t id);
-    /**
-     * @brief Updates z and the accumulators to compute D
-     */
-    void updateZ(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Computes the accumulators m_acc_D_A1 and m_acc_D_A2 for d
-     * d = A2 * A1^-1
-     */
-    void computeAccumulatorsD(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Updates d from the accumulators m_acc_D_A1 and m_acc_D_A2
-     */
-    void updateD(blitz::Array<double,1>& d);
-
-
-    /**
-     * @brief Get the zeroth order statistics
-     */
-    const std::vector<blitz::Array<double,1> >& getNacc() const
-    { return m_Nacc; }
-    /**
-     * @brief Get the first order statistics
-     */
-    const std::vector<blitz::Array<double,1> >& getFacc() const
-    { return m_Facc; }
-    /**
-     * @brief Get the x speaker factors
-     */
-    const std::vector<blitz::Array<double,2> >& getX() const
-    { return m_x; }
-    /**
-     * @brief Get the y speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getY() const
-    { return m_y; }
-    /**
-     * @brief Get the z speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getZ() const
-    { return m_z; }
-    /**
-     * @brief Set the x speaker factors
-     */
-    void setX(const std::vector<blitz::Array<double,2> >& X)
-    { m_x = X; }
-    /**
-     * @brief Set the y speaker factors
-     */
-    void setY(const std::vector<blitz::Array<double,1> >& y)
-    { m_y = y; }
-    /**
-     * @brief Set the z speaker factors
-     */
-    void setZ(const std::vector<blitz::Array<double,1> >& z)
-    { m_z = z; }
-
-    /**
-     * @brief Initializes the cache to process the given statistics
-     */
-    void initCache();
-
-    /**
-     * @brief Getters for the accumulators
-     */
-    const blitz::Array<double,3>& getAccVA1() const
-    { return m_acc_V_A1; }
-    const blitz::Array<double,2>& getAccVA2() const
-    { return m_acc_V_A2; }
-    const blitz::Array<double,3>& getAccUA1() const
-    { return m_acc_U_A1; }
-    const blitz::Array<double,2>& getAccUA2() const
-    { return m_acc_U_A2; }
-    const blitz::Array<double,1>& getAccDA1() const
-    { return m_acc_D_A1; }
-    const blitz::Array<double,1>& getAccDA2() const
-    { return m_acc_D_A2; }
-
-    /**
-     * @brief Setters for the accumulators, Very useful if the e-Step needs
-     * to be parallelized.
-     */
-    void setAccVA1(const blitz::Array<double,3>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_V_A1);
-      m_acc_V_A1 = acc; }
-    void setAccVA2(const blitz::Array<double,2>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_V_A2);
-      m_acc_V_A2 = acc; }
-    void setAccUA1(const blitz::Array<double,3>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_U_A1);
-      m_acc_U_A1 = acc; }
-    void setAccUA2(const blitz::Array<double,2>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_U_A2);
-      m_acc_U_A2 = acc; }
-    void setAccDA1(const blitz::Array<double,1>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_D_A1);
-      m_acc_D_A1 = acc; }
-    void setAccDA2(const blitz::Array<double,1>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_D_A2);
-      m_acc_D_A2 = acc; }
-
-
-  private:
-    size_t m_Nid; // Number of identities
-    size_t m_dim_C; // Number of Gaussian components of the UBM GMM
-    size_t m_dim_D; // Dimensionality of the feature space
-    size_t m_dim_ru; // Rank of the U subspace
-    size_t m_dim_rv; // Rank of the V subspace
-
-    std::vector<blitz::Array<double,2> > m_x; // matrix x of speaker factors for eigenchannels U, for each client
-    std::vector<blitz::Array<double,1> > m_y; // vector y of spealer factors for eigenvoices V, for each client
-    std::vector<blitz::Array<double,1> > m_z; // vector z of spealer factors for eigenvoices Z, for each client
-
-    std::vector<blitz::Array<double,1> > m_Nacc; // Sum of the zeroth order statistics over the sessions for each client, dimension C
-    std::vector<blitz::Array<double,1> > m_Facc; // Sum of the first order statistics over the sessions for each client, dimension CD
-
-    // Accumulators for the M-step
-    blitz::Array<double,3> m_acc_V_A1;
-    blitz::Array<double,2> m_acc_V_A2;
-    blitz::Array<double,3> m_acc_U_A1;
-    blitz::Array<double,2> m_acc_U_A2;
-    blitz::Array<double,1> m_acc_D_A1;
-    blitz::Array<double,1> m_acc_D_A2;
-
-    // Cache/Precomputation
-    blitz::Array<double,2> m_cache_VtSigmaInv; // Vt * diag(sigma)^-1
-    blitz::Array<double,3> m_cache_VProd; // first dimension is the Gaussian id
-    blitz::Array<double,2> m_cache_IdPlusVProd_i;
-    blitz::Array<double,1> m_cache_Fn_y_i;
-
-    blitz::Array<double,2> m_cache_UtSigmaInv; // Ut * diag(sigma)^-1
-    blitz::Array<double,3> m_cache_UProd; // first dimension is the Gaussian id
-    blitz::Array<double,2> m_cache_IdPlusUProd_ih;
-    blitz::Array<double,1> m_cache_Fn_x_ih;
-
-    blitz::Array<double,1> m_cache_DtSigmaInv; // Dt * diag(sigma)^-1
-    blitz::Array<double,1> m_cache_DProd; // supervector length dimension
-    blitz::Array<double,1> m_cache_IdPlusDProd_i;
-    blitz::Array<double,1> m_cache_Fn_z_i;
-
-    // Working arrays
-    mutable blitz::Array<double,2> m_tmp_ruru;
-    mutable blitz::Array<double,2> m_tmp_ruD;
-    mutable blitz::Array<double,2> m_tmp_rvrv;
-    mutable blitz::Array<double,2> m_tmp_rvD;
-    mutable blitz::Array<double,1> m_tmp_rv;
-    mutable blitz::Array<double,1> m_tmp_ru;
-    mutable blitz::Array<double,1> m_tmp_CD;
-    mutable blitz::Array<double,1> m_tmp_CD_b;
-};
-
-
-} } } // namespaces
-
-#endif /* BOB_LEARN_MISC_FABASETRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/GMMBaseTrainer.h b/bob/learn/misc/include/bob.learn.misc/GMMBaseTrainer.h
deleted file mode 100644
index 401505588399b680f9b1378c2bd269c28c8552a2..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/GMMBaseTrainer.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * @brief This class implements the E-step of the expectation-maximisation algorithm for a GMM Machine.
- * @details See Section 9.2.2 of Bishop, "Pattern recognition and machine learning", 2006
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_GMMBASETRAINER_H
-#define BOB_LEARN_MISC_GMMBASETRAINER_H
-
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <limits>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class implements the E-step of the expectation-maximisation
- * algorithm for a GMM Machine.
- * @details See Section 9.2.2 of Bishop,
- *   "Pattern recognition and machine learning", 2006
- */
-class GMMBaseTrainer
-{
-  public:
-    /**
-     * @brief Default constructor
-     */
-    GMMBaseTrainer(const bool update_means=true,
-                   const bool update_variances=false, 
-                   const bool update_weights=false,
-                   const double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon());
-
-    /**
-     * @brief Copy constructor
-     */
-    GMMBaseTrainer(const GMMBaseTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~GMMBaseTrainer();
-
-    /**
-     * @brief Initialization before the EM steps
-     */
-    void initialize(bob::learn::misc::GMMMachine& gmm);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset,
-     * and saves these as m_ss. Calculates the average
-     * log likelihood of the observations given the GMM,
-     * and returns this in average_log_likelihood.
-     *
-     * The statistics, m_ss, will be used in the mStep() that follows.
-     * Implements EMTrainer::eStep(double &)
-     */
-     void eStep(bob::learn::misc::GMMMachine& gmm,
-      const blitz::Array<double,2>& data);
-
-    /**
-     * @brief Computes the likelihood using current estimates of the latent
-     * variables
-     */
-    double computeLikelihood(bob::learn::misc::GMMMachine& gmm);
-
-
-    /**
-     * @brief Assigns from a different GMMBaseTrainer
-     */
-    GMMBaseTrainer& operator=(const GMMBaseTrainer &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const GMMBaseTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const GMMBaseTrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const GMMBaseTrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Returns the internal GMM statistics. Useful to parallelize the
-     * E-step
-     */
-    const bob::learn::misc::GMMStats getGMMStats() const
-    { return m_ss; }
-
-    /**
-     * @brief Sets the internal GMM statistics. Useful to parallelize the
-     * E-step
-     */
-    void setGMMStats(const bob::learn::misc::GMMStats& stats);
-    
-    /**
-     * update means on each iteration
-     */    
-    bool getUpdateMeans()
-    {return m_update_means;}
-    
-    /**
-     * update variances on each iteration
-     */
-    bool getUpdateVariances()
-    {return m_update_variances;}
-
-
-    bool getUpdateWeights()
-    {return m_update_weights;}
-    
-    
-    double getMeanVarUpdateResponsibilitiesThreshold()
-    {return m_mean_var_update_responsibilities_threshold;}
-    
-
-  private:
-  
-    /**
-     * These are the sufficient statistics, calculated during the
-     * E-step and used during the M-step
-     */
-    bob::learn::misc::GMMStats m_ss;
-
-
-    /**
-     * update means on each iteration
-     */
-    bool m_update_means;
-
-    /**
-     * update variances on each iteration
-     */
-    bool m_update_variances;
-
-    /**
-     * update weights on each iteration
-     */
-    bool m_update_weights;
-
-    /**
-     * threshold over the responsibilities of the Gaussians
-     * Equations 9.24, 9.25 of Bishop, "Pattern recognition and machine learning", 2006
-     * require a division by the responsibilities, which might be equal to zero
-     * because of numerical issue. This threshold is used to avoid such divisions.
-     */
-    double m_mean_var_update_responsibilities_threshold;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_GMMBASETRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/GMMMachine.h b/bob/learn/misc/include/bob.learn.misc/GMMMachine.h
deleted file mode 100644
index bf3808abf4d9290fce33c0ce2cb732e6870d2c1a..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/GMMMachine.h
+++ /dev/null
@@ -1,371 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief This class implements a multivariate diagonal Gaussian distribution.
- * @details See Section 2.3.9 of Bishop, "Pattern recognition and machine learning", 2006
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_GMMMACHINE_H
-#define BOB_LEARN_MISC_GMMMACHINE_H
-
-#include <bob.learn.misc/Gaussian.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.io.base/HDF5File.h>
-#include <iostream>
-#include <boost/shared_ptr.hpp>
-#include <vector>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class implements a multivariate diagonal Gaussian distribution.
- * @details See Section 2.3.9 of Bishop, "Pattern recognition and machine learning", 2006
- */
-class GMMMachine
-{
-  public:
-    /**
-     * Default constructor
-     */
-    GMMMachine();
-
-    /**
-     * Constructor
-     * @param[in] n_gaussians  The number of Gaussian components
-     * @param[in] n_inputs     The feature dimensionality
-     */
-    GMMMachine(const size_t n_gaussians, const size_t n_inputs);
-
-    /**
-     * Copy constructor
-     * (Needed because the GMM points to its constituent Gaussian members)
-     */
-    GMMMachine(const GMMMachine& other);
-
-    /**
-     * Constructor from a Configuration
-     */
-    GMMMachine(bob::io::base::HDF5File& config);
-
-    /**
-     * Assignment
-     */
-    GMMMachine& operator=(const GMMMachine &other);
-
-    /**
-     * Equal to
-     */
-    bool operator==(const GMMMachine& b) const;
-
-    /**
-     * Not equal to
-     */
-    bool operator!=(const GMMMachine& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const GMMMachine& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * Destructor
-     */
-    virtual ~GMMMachine();
-
-
-    /**
-     * Reset the input dimensionality, and the number of Gaussian components.
-     * Initialises the weights to uniform distribution.
-     * @param n_gaussians The number of Gaussian components
-     * @param n_inputs    The feature dimensionality
-     */
-    void resize(const size_t n_gaussians, const size_t n_inputs);
-
-
-    /////////////////////////
-    // Getters
-    ////////////////////////
-
-    /**
-     * Get number of inputs
-     */
-    size_t getNInputs() const
-    { return m_n_inputs; }
-
-    /**
-     * Get the weights ("mixing coefficients") of the Gaussian components
-     */
-    const blitz::Array<double,1>& getWeights() const
-    { return m_weights; }
-
-    /**
-     * Get the logarithm of the weights of the Gaussian components
-     */
-    inline const blitz::Array<double,1>& getLogWeights() const
-    { return m_cache_log_weights; }
-
-
-    /**
-     * Get the means
-     */    
-    const blitz::Array<double,2> getMeans() const;
-    
-    /**
-     * Get the mean supervector
-     */
-    void getMeanSupervector(blitz::Array<double,1> &mean_supervector) const;
-    
-     /**
-     * Returns a const reference to the supervector (Put in cache)
-     */
-    const blitz::Array<double,1>& getMeanSupervector() const;
-        
-    /**
-     * Get the variances
-     */
-    const blitz::Array<double,2> getVariances() const;
-    
-    /**
-     * Returns a const reference to the supervector (Put in cache)
-     */
-    const blitz::Array<double,1>& getVarianceSupervector() const;
-    
-
-    /**
-     * Get the variance flooring thresholds for each Gaussian in each dimension
-     */
-    const blitz::Array<double,2> getVarianceThresholds() const;
-
-
-
-    ///////////////////////
-    // Setters
-    ///////////////////////
-
-    /**
-     * Set the weights
-     */
-    void setWeights(const blitz::Array<double,1> &weights);
-
-    /**
-     * Set the means
-     */
-    void setMeans(const blitz::Array<double,2> &means);
-    /**
-     * Set the means from a supervector
-     */
-    void setMeanSupervector(const blitz::Array<double,1> &mean_supervector);
-
-    /**
-     * Set the variances
-     */
-    void setVariances(const blitz::Array<double,2> &variances);
-    /**
-     * Set the variances from a supervector
-     */
-    void setVarianceSupervector(const blitz::Array<double,1> &variance_supervector);
-
-    /**
-     * Set the variance flooring thresholds in each dimension
-     */
-    void setVarianceThresholds(const double value);
-    /**
-     * Set the variance flooring thresholds in each dimension
-     * (equal for all Gaussian components)
-     */
-    void setVarianceThresholds(blitz::Array<double,1> variance_thresholds);
-    /**
-     * Set the variance flooring thresholds for each Gaussian in each dimension
-     */
-    void setVarianceThresholds(const blitz::Array<double,2> &variance_thresholds);
-
-
-    ////////////////
-    // Methods
-    /////////////////
-
-    /**
-     * Get the weights in order to be updated
-     * ("mixing coefficients") of the Gaussian components
-     * @warning Only trainers should use this function for efficiency reason
-     */
-    inline blitz::Array<double,1>& updateWeights()
-    { return m_weights; }
-
-
-    /**
-     * Update the log of the weights in cache
-     * @warning Should be used by trainer only when using updateWeights()
-     */
-    void recomputeLogWeights() const;
-
-
-
-    /**
-     * Output the log likelihood of the sample, x, i.e. log(p(x|GMMMachine))
-     * @param[in]  x                                 The sample
-     * @param[out] log_weighted_gaussian_likelihoods For each Gaussian, i: log(weight_i*p(x|Gaussian_i))
-     * @return     The GMMMachine log likelihood, i.e. log(p(x|GMMMachine))
-     * Dimensions of the parameters are checked
-     */
-    double logLikelihood(const blitz::Array<double, 1> &x, blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const;
-
-    /**
-     * Output the log likelihood of the sample, x, i.e. log(p(x|GMMMachine))
-     * @param[in]  x                                 The sample
-     * @param[out] log_weighted_gaussian_likelihoods For each Gaussian, i: log(weight_i*p(x|Gaussian_i))
-     * @return     The GMMMachine log likelihood, i.e. log(p(x|GMMMachine))
-     * @warning Dimensions of the parameters are not checked
-     */
-    double logLikelihood_(const blitz::Array<double, 1> &x, blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const;
-
-    /**
-     * Output the log likelihood of the sample, x, i.e. log(p(x|GMM))
-     * @param[in]  x The sample
-     * Dimension of the input is checked
-     */
-    double logLikelihood(const blitz::Array<double, 1> &x) const;
-
-    /**
-     * Output the log likelihood of the sample, x, i.e. log(p(x|GMM))
-     * @param[in]  x The sample
-     * @warning Dimension of the input is not checked
-     */
-    double logLikelihood_(const blitz::Array<double, 1> &x) const;
-
-    /**
-     * Accumulates the GMM statistics over a set of samples.
-     * @see bool accStatistics(const blitz::Array<double,1> &x, GMMStats stats)
-     * Dimensions of the parameters are checked
-     */
-    void accStatistics(const blitz::Array<double,2>& input, GMMStats &stats) const;
-
-    /**
-     * Accumulates the GMM statistics over a set of samples.
-     * @see bool accStatistics(const blitz::Array<double,1> &x, GMMStats stats)
-     * @warning Dimensions of the parameters are not checked
-     */
-    void accStatistics_(const blitz::Array<double,2>& input, GMMStats &stats) const;
-
-    /**
-     * Accumulate the GMM statistics for this sample.
-     *
-     * @param[in]  x     The current sample
-     * @param[out] stats The accumulated statistics
-     * Dimensions of the parameters are checked
-     */
-    void accStatistics(const blitz::Array<double,1> &x, GMMStats &stats) const;
-
-    /**
-     * Accumulate the GMM statistics for this sample.
-     *
-     * @param[in]  x     The current sample
-     * @param[out] stats The accumulated statistics
-     * @warning Dimensions of the parameters are not checked
-     */
-    void accStatistics_(const blitz::Array<double,1> &x, GMMStats &stats) const;
-
-
-    /**
-     * Get a pointer to a particular Gaussian component
-     * @param[in] i The index of the Gaussian component
-     * @return A smart pointer to the i'th Gaussian component
-     *         if it exists, otherwise throws an exception
-     */
-    boost::shared_ptr<bob::learn::misc::Gaussian> getGaussian(const size_t i);
-
-
-    /**
-     * Return the number of Gaussian components
-     */
-    inline size_t getNGaussians() const
-    { return m_n_gaussians; }
-
-    /**
-     * Save to a Configuration
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * Load from a Configuration
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * Load/Reload mean/variance supervector in cache
-     */
-    void reloadCacheSupervectors() const;
-
-    friend std::ostream& operator<<(std::ostream& os, const GMMMachine& machine);
-
-
-  private:
-    /**
-     * Copy another GMMMachine
-     */
-    void copy(const GMMMachine&);
-
-    /**
-     * The number of Gaussian components
-     */
-    size_t m_n_gaussians;
-
-    /**
-     * The feature dimensionality
-     */
-    size_t m_n_inputs;
-
-    /**
-     * The Gaussian components
-     */
-    std::vector<boost::shared_ptr<Gaussian> > m_gaussians;
-
-    /**
-     * The weights (also known as "mixing coefficients")
-     */
-    blitz::Array<double,1> m_weights;
-
-    /**
-     * Update the mean and variance supervectors
-     * in cache (into a 1D blitz array)
-     */
-    void updateCacheSupervectors() const;
-
-    /**
-     * Initialise the cache members (allocate arrays)
-     */
-    void initCache() const;
-
-    /**
-     * Accumulate the GMM statistics for this sample.
-     * Called by accStatistics() and accStatistics_()
-     *
-     * @param[in]  x     The current sample
-     * @param[out] stats The accumulated statistics
-     * @param[in]  log_likelihood  The current log_likelihood
-     * @warning Dimensions of the parameters are not checked
-     */
-    void accStatisticsInternal(const blitz::Array<double,1> &x,
-      GMMStats &stats, const double log_likelihood) const;
-
-
-    /// Some cache arrays to avoid re-allocation when computing log-likelihoods
-    mutable blitz::Array<double,1> m_cache_log_weights;
-    mutable blitz::Array<double,1> m_cache_log_weighted_gaussian_likelihoods;
-    mutable blitz::Array<double,1> m_cache_P;
-    mutable blitz::Array<double,2> m_cache_Px;
-
-    mutable blitz::Array<double,1> m_cache_mean_supervector;
-    mutable blitz::Array<double,1> m_cache_variance_supervector;
-    mutable bool m_cache_supervector;
-
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_GMMMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/GMMStats.h b/bob/learn/misc/include/bob.learn.misc/GMMStats.h
deleted file mode 100644
index 4dbef6cc756c8958c6b1e63812ff10256d06be37..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/GMMStats.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_GMMSTATS_H
-#define BOB_LEARN_MISC_GMMSTATS_H
-
-#include <blitz/array.h>
-#include <bob.io.base/HDF5File.h>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief A container for GMM statistics.
- * @see GMMMachine
- *
- * With respect to Reynolds, "Speaker Verification Using Adapted
- * Gaussian Mixture Models", DSP, 2000:
- * Eq (8) is n(i)
- * Eq (9) is sumPx(i) / n(i)
- * Eq (10) is sumPxx(i) / n(i)
- */
-class GMMStats {
-  public:
-
-    /**
-     * Default constructor.
-     */
-    GMMStats();
-
-    /**
-     * Constructor.
-     * @param n_gaussians Number of Gaussians in the mixture model.
-     * @param n_inputs    Feature dimensionality.
-     */
-    GMMStats(const size_t n_gaussians, const size_t n_inputs);
-
-    /**
-     * Copy constructor
-     */
-    GMMStats(const GMMStats& other);
-
-    /**
-     * Constructor (from a Configuration)
-     */
-    GMMStats(bob::io::base::HDF5File& config);
-
-    /**
-     * Assigment
-     */
-    GMMStats& operator=(const GMMStats& other);
-
-    /**
-     * Equal to
-     */
-    bool operator==(const GMMStats& b) const;
-
-    /**
-     * Not Equal to
-     */
-    bool operator!=(const GMMStats& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const GMMStats& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * Updates a GMMStats with another GMMStats
-     */
-    void operator+=(const GMMStats& b);
-
-    /**
-     * Destructor
-     */
-    ~GMMStats();
-
-    /**
-     * Allocates space for the statistics and resets to zero.
-     * @param n_gaussians Number of Gaussians in the mixture model.
-     * @param n_inputs    Feature dimensionality.
-     */
-    void resize(const size_t n_gaussians, const size_t n_inputs);
-
-    /**
-     * Resets statistics to zero.
-     */
-    void init();
-
-    /**
-     * The accumulated log likelihood of all samples
-     */
-    double log_likelihood;
-
-    /**
-     * The accumulated number of samples
-     */
-    size_t T;
-
-    /**
-     * For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of P(gaussian_i|x)
-     */
-    blitz::Array<double,1> n;
-
-    /**
-     * For each Gaussian, the accumulated sum of responsibility times the sample
-     */
-    blitz::Array<double,2> sumPx;
-
-    /**
-     * For each Gaussian, the accumulated sum of responsibility times the sample squared
-     */
-    blitz::Array<double,2> sumPxx;
-
-    /**
-     * Save to a Configuration
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * Load from a Configuration
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    friend std::ostream& operator<<(std::ostream& os, const GMMStats& g);
-
-  private:
-    /**
-     * Copy another GMMStats
-     */
-    void copy(const GMMStats&);
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_GMMSTATS_H
diff --git a/bob/learn/misc/include/bob.learn.misc/Gaussian.h b/bob/learn/misc/include/bob.learn.misc/Gaussian.h
deleted file mode 100644
index acb08fc086110266f76f52099fcc30dca69e2f6c..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/Gaussian.h
+++ /dev/null
@@ -1,247 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_GAUSSIAN_H
-#define BOB_LEARN_MISC_GAUSSIAN_H
-
-#include <bob.io.base/HDF5File.h>
-#include <blitz/array.h>
-#include <limits>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class implements a multivariate diagonal Gaussian distribution.
- */
-class Gaussian
-{
-  public:
-    /**
-     * Default constructor
-     */
-    Gaussian();
-
-    /**
-     * Constructor
-     * @param[in] n_inputs The feature dimensionality
-     */
-    Gaussian(const size_t n_inputs);
-
-    /**
-     * Destructor
-     */
-    virtual ~Gaussian();
-
-    /**
-     * Copy constructor
-     */
-    Gaussian(const Gaussian& other);
-
-    /**
-     * Constructs from a configuration file
-     */
-    Gaussian(bob::io::base::HDF5File& config);
-
-    /**
-     * Assignment
-     */
-    Gaussian& operator=(const Gaussian &other);
-
-    /**
-     * Equal to
-     */
-    bool operator==(const Gaussian& b) const;
-    /**
-     * Not equal to
-     */
-    bool operator!=(const Gaussian& b) const;
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const Gaussian& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * Set the input dimensionality, reset the mean to zero
-     * and the variance to one.
-     * @see resize()
-     * @param n_inputs The feature dimensionality
-     * @warning The mean and variance are not initialized
-     */
-    void setNInputs(const size_t n_inputs);
-
-    /**
-     * Get the input dimensionality
-     */
-    size_t getNInputs() const
-    { return m_n_inputs; }
-
-    /**
-     * Set the input dimensionality, reset the mean to zero
-     * and the variance to one.
-     * @see setNInputs()
-     * @param n_inputs The feature dimensionality
-     */
-    void resize(const size_t n_inputs);
-
-    /**
-     * Get the mean
-     */
-    inline const blitz::Array<double,1>& getMean() const
-    { return m_mean; }
-
-    /**
-     * Get the mean in order to be updated
-     * @warning Only trainers should use this function for efficiency reason
-     */
-    inline blitz::Array<double,1>& updateMean()
-    { return m_mean; }
-
-    /**
-     * Set the mean
-     */
-    void setMean(const blitz::Array<double,1> &mean);
-
-    /**
-     * Get the variance (the diagonal of the covariance matrix)
-     */
-    inline const blitz::Array<double,1>& getVariance() const
-    { return m_variance; }
-
-    /**
-     * Get the variance in order to be updated
-     * @warning Only trainers should use this function for efficiency reason
-     */
-    inline blitz::Array<double,1>& updateVariance()
-    { return m_variance; }
-
-    /**
-     * Set the variance
-     */
-    void setVariance(const blitz::Array<double,1> &variance);
-
-    /**
-     * Get the variance flooring thresholds
-     */
-    const blitz::Array<double,1>& getVarianceThresholds() const
-    { return m_variance_thresholds; }
-
-    /**
-     * Get the variance thresholds in order to be updated
-     * @warning Only trainers should use this function for efficiency reason
-     */
-    inline blitz::Array<double,1>& updateVarianceThreshods()
-    { return m_variance_thresholds; }
-
-    /**
-     * Set the variance flooring thresholds
-     */
-    void setVarianceThresholds(const blitz::Array<double,1> &variance_thresholds);
-
-    /**
-     * Set the variance flooring thresholds
-     */
-    void setVarianceThresholds(const double value);
-
-    /**
-     * Apply the variance flooring thresholds
-     * This method is called when using setVarianceThresholds()
-     * @warning It is only useful when using updateVarianceThreshods(),
-     * and should mostly be done by trainers
-     */
-    void applyVarianceThresholds();
-
-    /**
-     * Output the log likelihood of the sample, x
-     * @param x The data sample (feature vector)
-     */
-    double logLikelihood(const blitz::Array<double,1>& x) const;
-
-    /**
-     * Output the log likelihood of the sample, x
-     * @param x The data sample (feature vector)
-     * @warning The input is NOT checked
-     */
-    double logLikelihood_(const blitz::Array<double,1>& x) const;
-
-    /**
-     * Saves to a Configuration
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * Loads from a Configuration
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * Prints a Gaussian in the output stream
-     */
-    friend std::ostream& operator<<(std::ostream& os, const bob::learn::misc::Gaussian& g);
-
-
-  private:
-    /**
-     * Copies another Gaussian
-     */
-    void copy(const Gaussian& other);
-
-    /**
-     * Computes n_inputs * log(2*pi)
-     */
-    void preComputeNLog2Pi();
-
-    /**
-     * Computes and stores the value of g_norm,
-     * to later speed up evaluation of logLikelihood()
-     * Note: g_norm is defined as follows:
-     * log(Gaussian pdf) = log(1/((2pi)^(k/2)(det)^(1/2)) * exp(...))
-     *                   = -1/2 * g_norm * (...)
-     */
-    void preComputeConstants();
-
-    /**
-     * The mean vector of the Gaussian
-     */
-    blitz::Array<double,1> m_mean;
-
-    /**
-     * The diagonal of the covariance matrix (assumed to be diagonal)
-     */
-    blitz::Array<double,1> m_variance;
-
-    /**
-     * The variance flooring thresholds, i.e. the minimum allowed
-     * value of variance in each dimension.
-     * The variance will be set to this value if an attempt is made
-     * to set it to a smaller value.
-     */
-    blitz::Array<double,1> m_variance_thresholds;
-
-    /**
-     * A constant that depends only on the feature dimensionality
-     * m_n_log2pi = n_inputs * log(2*pi) (used to compute m_gnorm)
-     */
-    double m_n_log2pi;
-
-    /**
-     * A constant that depends only on the feature dimensionality
-     * (m_n_inputs) and the variance
-     * @see bool preComputeConstants()
-     */
-    double m_g_norm;
-
-    /**
-     * The number of inputs (feature dimensionality)
-     */
-    size_t m_n_inputs;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_GAUSSIAN_H
diff --git a/bob/learn/misc/include/bob.learn.misc/ISVBase.h b/bob/learn/misc/include/bob.learn.misc/ISVBase.h
deleted file mode 100644
index c062076362582d878b2827b8aea0e924ba4811c8..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/ISVBase.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/**
- * @date Tue Jan 27 16:02:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief A base class for Joint Factor Analysis-like machines
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_ISVBASE_H
-#define BOB_LEARN_MISC_ISVBASE_H
-
-#include <stdexcept>
-
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/FABase.h>
-
-#include <bob.io.base/HDF5File.h>
-#include <boost/shared_ptr.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-
-/**
- * @brief An ISV Base class which contains U and D matrices
- * TODO: add a reference to the journal articles
- */
-class ISVBase
-{
-  public:
-    /**
-     * @brief Default constructor. Builds an otherwise invalid 0 x 0 ISVBase
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     */
-    ISVBase();
-
-    /**
-     * @brief Constructor. Builds a new ISVBase.
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     *
-     * @param ubm The Universal Background Model
-     * @param ru size of U (CD x ru)
-     * @warning ru SHOULD BE >= 1.
-     */
-    ISVBase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm, const size_t ru=1);
-
-    /**
-     * @brief Copy constructor
-     */
-    ISVBase(const ISVBase& other);
-
-    /**
-     * @deprecated Starts a new JFAMachine from an existing Configuration object.
-     */
-    ISVBase(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Just to virtualise the destructor
-     */
-    virtual ~ISVBase();
-
-    /**
-     * @brief Assigns from a different JFA machine
-     */
-    ISVBase& operator=(const ISVBase &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const ISVBase& b) const
-    { return m_base.operator==(b.m_base); }
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const ISVBase& b) const
-    { return m_base.operator!=(b.m_base); }
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const ISVBase& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const
-    { return m_base.is_similar_to(b.m_base, r_epsilon, a_epsilon); }
-
-    /**
-     * @brief Saves machine to an HDF5 file
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets
-     * the current state.
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Returns the UBM
-     */
-    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
-    { return m_base.getUbm(); }
-
-    /**
-     * @brief Returns the U matrix
-     */
-    const blitz::Array<double,2>& getU() const
-    { return m_base.getU(); }
-
-    /**
-     * @brief Returns the diagonal matrix diag(d) (as a 1D vector)
-     */
-    const blitz::Array<double,1>& getD() const
-    { return m_base.getD(); }
-
-    /**
-     * @brief Returns the number of Gaussian components C
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNGaussians() const
-    { return m_base.getNGaussians(); }
-
-    /**
-     * @brief Returns the feature dimensionality D
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNInputs() const
-    { return m_base.getNInputs(); }
-
-    /**
-     * @brief Returns the supervector length CD
-     * (CxD: Number of Gaussian components by the feature dimensionality)
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getSupervectorLength() const
-    { return m_base.getSupervectorLength(); }
-
-    /**
-     * @brief Returns the size/rank ru of the U matrix
-     */
-    const size_t getDimRu() const
-    { return m_base.getDimRu(); }
-
-    /**
-     * @brief Resets the dimensionality of the subspace U
-     * U is hence uninitialized.
-     */
-    void resize(const size_t ru)
-    { m_base.resize(ru, 1);
-      blitz::Array<double,2>& V = m_base.updateV();
-      V = 0;
-     }
-
-    /**
-     * @brief Returns the U matrix in order to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,2>& updateU()
-    { return m_base.updateU(); }
-
-    /**
-     * @brief Returns the diagonal matrix diag(d) (as a 1D vector) in order
-     * to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,1>& updateD()
-    { return m_base.updateD(); }
-
-
-    /**
-     * @brief Sets (the mean supervector of) the Universal Background Model
-     * U, V and d are uninitialized in case of dimensions update (C or D)
-     */
-    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
-    { m_base.setUbm(ubm); }
-
-    /**
-     * @brief Sets the U matrix
-     */
-    void setU(const blitz::Array<double,2>& U)
-    { m_base.setU(U); }
-
-    /**
-     * @brief Sets the diagonal matrix diag(d)
-     * (a 1D vector is expected as an argument)
-     */
-    void setD(const blitz::Array<double,1>& d)
-    { m_base.setD(d); }
-
-    /**
-     * @brief Estimates x from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
-    { m_base.estimateX(gmm_stats, x); }
-
-    /**
-     * @brief Precompute (put U^{T}.Sigma^{-1} matrix in cache)
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    void precompute()
-    { m_base.updateCacheUbmUVD(); }
-
-    /**
-     * @brief Returns the FABase member
-     */
-    const bob::learn::misc::FABase& getBase() const
-    { return m_base; }
-
-
-  private:
-    // FABase
-    bob::learn::misc::FABase m_base;
-};
-
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_JFABASE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/ISVMachine.h b/bob/learn/misc/include/bob.learn.misc/ISVMachine.h
deleted file mode 100644
index 16adfb92f5c22bdb08dc3f596ae7fecfafaab54e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/ISVMachine.h
+++ /dev/null
@@ -1,230 +0,0 @@
-/**
- * @date Tue Jan 27 16:06:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief A base class for Joint Factor Analysis-like machines
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_ISVMACHINE_H
-#define BOB_LEARN_MISC_ISVMACHINE_H
-
-#include <stdexcept>
-
-#include <bob.learn.misc/ISVBase.h>
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/LinearScoring.h>
-
-#include <bob.io.base/HDF5File.h>
-#include <boost/shared_ptr.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-
-/**
- * @brief A ISVMachine which is associated to a ISVBase that contains
- *   U D matrices.
- * TODO: add a reference to the journal articles
- */
-class ISVMachine
-{
-  public:
-    /**
-     * @brief Default constructor. Builds an otherwise invalid 0 x 0 ISVMachine
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     */
-    ISVMachine();
-
-    /**
-     * @brief Constructor. Builds a new ISVMachine.
-     *
-     * @param isv_base The ISVBase associated with this machine
-     */
-    ISVMachine(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base);
-
-    /**
-     * @brief Copy constructor
-     */
-    ISVMachine(const ISVMachine& other);
-
-    /**
-     * @brief Starts a new ISVMachine from an existing Configuration object.
-     */
-    ISVMachine(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Just to virtualise the destructor
-     */
-    virtual ~ISVMachine();
-
-    /**
-     * @brief Assigns from a different ISV machine
-     */
-    ISVMachine& operator=(const ISVMachine &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const ISVMachine& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const ISVMachine& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const ISVMachine& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Saves machine to an HDF5 file
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets
-     * the current state.
-     */
-    void load(bob::io::base::HDF5File& config);
-
-
-    /**
-     * @brief Returns the number of Gaussian components C
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNGaussians() const
-    { return m_isv_base->getNGaussians(); }
-
-    /**
-     * @brief Returns the feature dimensionality D
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNInputs() const
-    { return m_isv_base->getNInputs(); }
-
-    /**
-     * @brief Returns the supervector length CD
-     * (CxD: Number of Gaussian components by the feature dimensionality)
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getSupervectorLength() const
-    { return m_isv_base->getSupervectorLength(); }
-
-    /**
-     * @brief Returns the size/rank ru of the U matrix
-     */
-    const size_t getDimRu() const
-    { return m_isv_base->getDimRu(); }
-
-    /**
-     * @brief Returns the x session factor
-     */
-    const blitz::Array<double,1>& getX() const
-    { return m_cache_x; }
-
-    /**
-     * @brief Returns the z speaker factor
-     */
-    const blitz::Array<double,1>& getZ() const
-    { return m_z; }
-
-    /**
-     * @brief Returns the z speaker factors in order to update it
-     */
-    blitz::Array<double,1>& updateZ()
-    { return m_z; }
-
-    /**
-     * @brief Returns the V matrix
-     */
-    void setZ(const blitz::Array<double,1>& z);
-
-    /**
-     * @brief Returns the ISVBase
-     */
-    const boost::shared_ptr<bob::learn::misc::ISVBase> getISVBase() const
-    { return m_isv_base; }
-
-    /**
-     * @brief Sets the ISVBase
-     */
-    void setISVBase(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base);
-
-
-    /**
-     * @brief Estimates x from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
-    { m_isv_base->estimateX(gmm_stats, x); }
-    /**
-     * @brief Estimates Ux from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateUx(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
-
-   /**
-    * @brief Execute the machine
-    *
-    * @param input input data used by the machine
-    * @warning Inputs are checked
-    * @return score value computed by the machine    
-    */
-    double forward(const bob::learn::misc::GMMStats& input);
-    /**
-     * @brief Computes a score for the given UBM statistics and given the
-     * Ux vector
-     */
-    double forward(const bob::learn::misc::GMMStats& gmm_stats,
-      const blitz::Array<double,1>& Ux);
-
-    /**
-     * @brief Execute the machine
-     *
-     * @param input input data used by the machine
-     * @warning Inputs are NOT checked
-     * @return score value computed by the machine     
-     */
-    double forward_(const bob::learn::misc::GMMStats& input);
-
-  private:
-    /**
-     * @brief Resize latent variable according to the ISVBase
-     */
-    void resize();
-    /**
-     * @ Update cache
-     */
-    void updateCache();
-    /**
-     * @brief Resize working arrays
-     */
-    void resizeTmp();
-
-    // UBM
-    boost::shared_ptr<bob::learn::misc::ISVBase> m_isv_base;
-
-    // y and z vectors/factors learned during the enrolment procedure
-    blitz::Array<double,1> m_z;
-
-    // cache
-    blitz::Array<double,1> m_cache_mDz;
-    mutable blitz::Array<double,1> m_cache_x;
-
-    // x vector/factor in cache when computing scores
-    mutable blitz::Array<double,1> m_tmp_Ux;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_ISVMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/ISVTrainer.h b/bob/learn/misc/include/bob.learn.misc/ISVTrainer.h
deleted file mode 100644
index 1f041419f40190fab36f614e012bb25be9904cd4..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/ISVTrainer.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/**
- * @date Tue Jul 19 12:16:17 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief JFA functions
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_ISVTRAINER_H
-#define BOB_LEARN_MISC_ISVTRAINER_H
-
-#include <blitz/array.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.learn.misc/FABaseTrainer.h>
-#include <bob.learn.misc/ISVMachine.h>
-#include <vector>
-
-#include <map>
-#include <string>
-#include <bob.core/array_copy.h>
-#include <boost/shared_ptr.hpp>
-#include <boost/random.hpp>
-#include <bob.core/logging.h>
-
-namespace bob { namespace learn { namespace misc {
-
-class ISVTrainer
-{
-  public:
-    /**
-     * @brief Constructor
-     */
-    ISVTrainer(const double relevance_factor=4.);
-
-    /**
-     * @brief Copy onstructor
-     */
-    ISVTrainer(const ISVTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~ISVTrainer();
-
-    /**
-     * @brief Assignment operator
-     */
-    ISVTrainer& operator=(const ISVTrainer& other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const ISVTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const ISVTrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const ISVTrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief This methods performs some initialization before the EM loop.
-     */
-    virtual void initialize(bob::learn::misc::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset
-     * The statistics will be used in the mStep() that follows.
-     */
-    virtual void eStep(bob::learn::misc::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief Performs a maximization step to update the parameters of the
-     * factor analysis model.
-     */
-    virtual void mStep(bob::learn::misc::ISVBase& machine);
-
-    /**
-     * @brief Computes the average log likelihood using the current estimates
-     * of the latent variables.
-     */
-    virtual double computeLikelihood(bob::learn::misc::ISVBase& machine);
-
-    /**
-     * @brief Enrol a client
-     */
-    void enrol(bob::learn::misc::ISVMachine& machine,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& features,
-      const size_t n_iter);
-
-    /**
-     * @brief Get the x speaker factors
-     */
-    const std::vector<blitz::Array<double,2> >& getX() const
-    { return m_base_trainer.getX(); }
-    /**
-     * @brief Get the z speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getZ() const
-    { return m_base_trainer.getZ(); }
-    /**
-     * @brief Set the x speaker factors
-     */
-    void setX(const std::vector<blitz::Array<double,2> >& X)
-    { m_base_trainer.setX(X); }
-    /**
-     * @brief Set the z speaker factors
-     */
-    void setZ(const std::vector<blitz::Array<double,1> >& z)
-    { m_base_trainer.setZ(z); }
-
-    /**
-     * @brief Getters for the accumulators
-     */
-    const blitz::Array<double,3>& getAccUA1() const
-    { return m_base_trainer.getAccUA1(); }
-    const blitz::Array<double,2>& getAccUA2() const
-    { return m_base_trainer.getAccUA2(); }
-
-    /**
-     * @brief Setters for the accumulators, Very useful if the e-Step needs
-     * to be parallelized.
-     */
-    void setAccUA1(const blitz::Array<double,3>& acc)
-    { m_base_trainer.setAccUA1(acc); }
-    void setAccUA2(const blitz::Array<double,2>& acc)
-    { m_base_trainer.setAccUA2(acc); }
-
-
-  private:
-    /**
-     * @brief Initialize D to sqrt(ubm_var/relevance_factor)
-     */
-    void initializeD(bob::learn::misc::ISVBase& machine) const;
-
-    // Attributes
-    bob::learn::misc::FABaseTrainer m_base_trainer;
-
-    double m_relevance_factor;
-
-    boost::shared_ptr<boost::mt19937> m_rng; ///< The random number generator for the inialization};
-};
-
-} } } // namespaces
-
-#endif /* BOB_LEARN_MISC_ISVTRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h b/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h
deleted file mode 100644
index 88dd5c1f6f26e683edd76610a9fdc2434e504665..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h
+++ /dev/null
@@ -1,274 +0,0 @@
-/**
- * @date Sat Mar 30 20:55:00 2013 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_IVECTOR_MACHINE_H
-#define BOB_LEARN_MISC_IVECTOR_MACHINE_H
-
-#include <blitz/array.h>
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.io.base/HDF5File.h>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief An IVectorMachine consists of a Total Variability subspace \f$T\f$
- *   and allows the extraction of IVector\n
- * Reference:\n
- * "Front-End Factor Analysis For Speaker Verification",
- *    N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet,
- *   IEEE Trans. on Audio, Speech and Language Processing
- */
-class IVectorMachine
-{
-  public:
-    /**
-     * @brief Default constructor. Builds an IVectorMachine.
-     * The Universal Background Model and the matrices \f$T\f$ and
-     * \f$diag(\Sigma)\f$ are not initialized.
-     */
-    IVectorMachine();
-
-    /**
-     * @brief Constructor. Builds a new IVectorMachine.
-     * The Universal Background Model and the matrices \f$T\f$ and
-     * \f$diag(\Sigma)\f$ are not initialized.
-     *
-     * @param ubm The Universal Background Model
-     * @param rt size of \f$T\f$ (CD x rt)
-     * @param variance_threshold variance flooring threshold for the
-     *   \f$\Sigma\f$ (diagonal) matrix
-     * @warning rt SHOULD BE >= 1.
-     */
-    IVectorMachine(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
-      const size_t rt=1, const double variance_threshold=1e-10);
-
-    /**
-     * @brief Copy constructor
-     */
-    IVectorMachine(const IVectorMachine& other);
-
-    /**
-     * @brief Starts a new IVectorMachine from an existing Configuration object.
-     */
-    IVectorMachine(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~IVectorMachine();
-
-    /**
-     * @brief Assigns from a different IVectorMachine
-     */
-    IVectorMachine& operator=(const IVectorMachine &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const IVectorMachine& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const IVectorMachine& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const IVectorMachine& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Saves model to an HDF5 file
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets
-     * the current state.
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Returns the UBM
-     */
-    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
-    { return m_ubm; }
-
-    /**
-     * @brief Returns the \f$T\f$ matrix
-     */
-    const blitz::Array<double,2>& getT() const
-    { return m_T; }
-
-    /**
-     * @brief Returns the \f$\Sigma\f$ (diagonal) matrix as a 1D array
-     */
-    const blitz::Array<double,1>& getSigma() const
-    { return m_sigma; }
-
-    /**
-     * @brief Gets the variance flooring threshold
-     */
-    const double getVarianceThreshold() const
-    { return m_variance_threshold; }
-
-    /**
-     * @brief Returns the number of Gaussian components C.
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNGaussians() const
-    { return m_ubm->getNGaussians(); }
-
-    /**
-     * @brief Returns the feature dimensionality D.
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNInputs() const
-    { return m_ubm->getNInputs(); }
-
-    /**
-     * @brief Returns the supervector length CD.
-     * (CxD: Number of Gaussian components by the feature dimensionality)
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getSupervectorLength() const
-    { return m_ubm->getNGaussians()*m_ubm->getNInputs(); }
-
-    /**
-     * @brief Returns the size/rank rt of the \f$T\f$ matrix
-     */
-    const size_t getDimRt() const
-    { return m_rt; }
-
-    /**
-     * @brief Resets the dimensionality of the subspace \f$T\f$.
-     * \f$T\f$ is hence uninitialized.
-     */
-    void resize(const size_t rt);
-
-    /**
-     * @brief Returns the \f$T\f$ matrix in order to update it.
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,2>& updateT()
-    { return m_T; }
-
-    /**
-     * @brief Returns the \f$\Sigma\f$ (diagonal) matrix in order to update it.
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,1>& updateSigma()
-    { return m_sigma; }
-
-    /**
-     * @brief Sets (the mean supervector of) the Universal Background Model.
-     * \f$T\f$ and \f$\Sigma\f$ are uninitialized in case of dimensions update (C or D)
-     */
-    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm);
-
-    /**
-     * @brief Sets the \f$T\f$ matrix
-     */
-    void setT(const blitz::Array<double,2>& T);
-
-    /**
-     * @brief Sets the \f$\Sigma\f$ (diagonal) matrix
-     */
-    void setSigma(const blitz::Array<double,1>& sigma);
-
-    /**
-     * @brief Set the variance flooring threshold
-     */
-    void setVarianceThreshold(const double value);
-
-    /**
-     * @brief Update arrays in cache
-     * @warning It is only useful when using updateT() or updateSigma()
-     * and should mostly be done by trainers
-     */
-    void precompute();
-
-    /**
-     * @brief Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
-     * @warning No check is perform
-     */
-    void computeIdTtSigmaInvT(const bob::learn::misc::GMMStats& input, blitz::Array<double,2>& output) const;
-
-    /**
-     * @brief Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
-     * @warning No check is perform
-     */
-    void computeTtSigmaInvFnorm(const bob::learn::misc::GMMStats& input, blitz::Array<double,1>& output) const;
-
-    /**
-     * @brief Extracts an ivector from the input GMM statistics
-     *
-     * @param input GMM statistics to be used by the machine
-     * @param output I-vector computed by the machine
-     */
-    void forward(const bob::learn::misc::GMMStats& input, blitz::Array<double,1>& output) const;
-
-    /**
-     * @brief Extracts an ivector from the input GMM statistics
-     *
-     * @param input GMM statistics to be used by the machine
-     * @param output I-vector computed by the machine
-     * @warning Inputs are NOT checked
-     */
-    void forward_(const bob::learn::misc::GMMStats& input, blitz::Array<double,1>& output) const;
-
-  private:
-    /**
-     * @brief Apply the variance flooring thresholds.
-     * This method is called when using setVarianceThresholds()
-     */
-    void applyVarianceThreshold();
-
-    /**
-     * @brief Resize cache
-     */
-    void resizeCache();
-    /**
-     * @brief Resize working arrays
-     */
-    void resizeTmp();
-    /**
-     * @brief Resize cache and working arrays before updating cache
-     */
-    void resizePrecompute();
-
-    // UBM
-    boost::shared_ptr<bob::learn::misc::GMMMachine> m_ubm;
-
-    // dimensionality
-    size_t m_rt; ///< size of \f$T\f$ (CD x rt)
-
-    ///< \f$T\f$ and \f$Sigma\f$ matrices.
-    ///< \f$Sigma\f$ is assumed to be diagonal, and only the diagonal is stored
-    blitz::Array<double,2> m_T; ///< The total variability matrix \f$T\f$
-    blitz::Array<double,1> m_sigma; ///< The diagonal covariance matrix \f$\Sigma\f$
-    double m_variance_threshold; ///< The variance flooring threshold
-
-    blitz::Array<double,3> m_cache_Tct_sigmacInv;
-    blitz::Array<double,3> m_cache_Tct_sigmacInv_Tc;
-
-    mutable blitz::Array<double,1> m_tmp_d;
-    mutable blitz::Array<double,1> m_tmp_t1;
-    mutable blitz::Array<double,1> m_tmp_t2;
-    mutable blitz::Array<double,2> m_tmp_tt;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_IVECTOR_MACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h b/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h
deleted file mode 100644
index 4f496d659859f00ff1012b56f8094b4005fbafbb..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * @date Sat Mar 30 20:55:00 2013 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_IVECTOR_TRAINER_H
-#define BOB_LEARN_MISC_IVECTOR_TRAINER_H
-
-#include <blitz/array.h>
-#include <bob.learn.misc/IVectorMachine.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <boost/shared_ptr.hpp>
-#include <vector>
-#include <bob.core/array_copy.h>
-#include <boost/random.hpp>
-
-#include <boost/random/mersenne_twister.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief An IVectorTrainer to learn a Total Variability subspace \f$T\f$
- *  (and eventually a covariance matrix \f$\Sigma\f$).\n
- * Reference:\n
- * "Front-End Factor Analysis For Speaker Verification",
- *    N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet,
- *   IEEE Trans. on Audio, Speech and Language Processing
- */
-class IVectorTrainer
-{
-  public:
-    /**
-     * @brief Default constructor. Builds an IVectorTrainer
-     */
-    IVectorTrainer(const bool update_sigma=false);
-
-    /**
-     * @brief Copy constructor
-     */
-    IVectorTrainer(const IVectorTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~IVectorTrainer();
-
-    /**
-     * @brief Initialization before the EM loop
-     */
-    virtual void initialize(bob::learn::misc::IVectorMachine& ivector);
-
-    /**
-     * @brief Calculates statistics across the dataset,
-     * and saves these as:
-     * - m_acc_Nij_wij2
-     * - m_acc_Fnormij_wij
-     * - m_acc_Nij (only if update_sigma is enabled)
-     * - m_acc_Snormij (only if update_sigma is enabled)
-     *
-     * These statistics will be used in the mStep() that follows.
-     */
-    virtual void eStep(bob::learn::misc::IVectorMachine& ivector,
-      const std::vector<bob::learn::misc::GMMStats>& data);
-
-    /**
-     * @brief Maximisation step: Update the Total Variability matrix \f$T\f$
-     * and \f$\Sigma\f$ if update_sigma is enabled.
-     */
-    virtual void mStep(bob::learn::misc::IVectorMachine& ivector);
-
-
-    /**
-     * @brief Assigns from a different IVectorTrainer
-     */
-    IVectorTrainer& operator=(const IVectorTrainer &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const IVectorTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const IVectorTrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const IVectorTrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Getters for the accumulators
-     */
-    const blitz::Array<double,3>& getAccNijWij2() const
-    { return m_acc_Nij_wij2; }
-    const blitz::Array<double,3>& getAccFnormijWij() const
-    { return m_acc_Fnormij_wij; }
-    const blitz::Array<double,1>& getAccNij() const
-    { return m_acc_Nij; }
-    const blitz::Array<double,2>& getAccSnormij() const
-    { return m_acc_Snormij; }
-
-    /**
-     * @brief Setters for the accumulators, Very useful if the e-Step needs
-     * to be parallelized.
-     */
-    void setAccNijWij2(const blitz::Array<double,3>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_Nij_wij2);
-      m_acc_Nij_wij2 = acc; }
-    void setAccFnormijWij(const blitz::Array<double,3>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_Fnormij_wij);
-      m_acc_Fnormij_wij = acc; }
-    void setAccNij(const blitz::Array<double,1>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_Nij);
-      m_acc_Nij = acc; }
-    void setAccSnormij(const blitz::Array<double,2>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_Snormij);
-      m_acc_Snormij = acc; }
-
-  protected:
-    // Attributes
-    bool m_update_sigma;
-
-    // Acccumulators
-    blitz::Array<double,3> m_acc_Nij_wij2;
-    blitz::Array<double,3> m_acc_Fnormij_wij;
-    blitz::Array<double,1> m_acc_Nij;
-    blitz::Array<double,2> m_acc_Snormij;
-
-    // Working arrays
-    mutable blitz::Array<double,1> m_tmp_wij;
-    mutable blitz::Array<double,2> m_tmp_wij2;
-    mutable blitz::Array<double,1> m_tmp_d1;
-    mutable blitz::Array<double,1> m_tmp_t1;
-    mutable blitz::Array<double,2> m_tmp_dd1;
-    mutable blitz::Array<double,2> m_tmp_dt1;
-    mutable blitz::Array<double,2> m_tmp_tt1;
-    mutable blitz::Array<double,2> m_tmp_tt2;
-    
-    /**
-     * @brief The random number generator for the inialization
-     */
-    boost::shared_ptr<boost::mt19937> m_rng;    
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_IVECTOR_TRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/JFABase.h b/bob/learn/misc/include/bob.learn.misc/JFABase.h
deleted file mode 100644
index 7fbc669238363b83066f17e60aa7c18f4c2459b3..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/JFABase.h
+++ /dev/null
@@ -1,253 +0,0 @@
-/**
- * @date Tue Jan 27 15:54:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief A base class for Joint Factor Analysis-like machines
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_JFABASE_H
-#define BOB_LEARN_MISC_JFABASE_H
-
-#include <stdexcept>
-
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/FABase.h>
-//#include <bob.learn.misc/LinearScoring.h>
-
-#include <bob.io.base/HDF5File.h>
-#include <boost/shared_ptr.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-
-/**
- * @brief A JFA Base class which contains U, V and D matrices
- * TODO: add a reference to the journal articles
- */
-class JFABase
-{
-  public:
-    /**
-     * @brief Default constructor. Builds a 1 x 1 JFABase
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     */
-    JFABase();
-
-    /**
-     * @brief Constructor. Builds a new JFABase.
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     *
-     * @param ubm The Universal Background Model
-     * @param ru size of U (CD x ru)
-     * @param rv size of U (CD x rv)
-     * @warning ru and rv SHOULD BE  >= 1.
-     */
-    JFABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
-
-    /**
-     * @brief Copy constructor
-     */
-    JFABase(const JFABase& other);
-
-    /**
-     * @deprecated Starts a new JFAMachine from an existing Configuration object.
-     */
-    JFABase(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Just to virtualise the destructor
-     */
-    virtual ~JFABase();
-
-    /**
-     * @brief Assigns from a different JFA machine
-     */
-    JFABase& operator=(const JFABase &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const JFABase& b) const
-    { return m_base.operator==(b.m_base); }
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const JFABase& b) const
-    { return m_base.operator!=(b.m_base); }
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const JFABase& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const
-    { return m_base.is_similar_to(b.m_base, r_epsilon, a_epsilon); }
-
-    /**
-     * @brief Saves model to an HDF5 file
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets
-     * the current state.
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Returns the UBM
-     */
-    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
-    { return m_base.getUbm(); }
-
-    /**
-     * @brief Returns the U matrix
-     */
-    const blitz::Array<double,2>& getU() const
-    { return m_base.getU(); }
-
-    /**
-     * @brief Returns the V matrix
-     */
-    const blitz::Array<double,2>& getV() const
-    { return m_base.getV(); }
-
-    /**
-     * @brief Returns the diagonal matrix diag(d) (as a 1D vector)
-     */
-    const blitz::Array<double,1>& getD() const
-    { return m_base.getD(); }
-
-    /**
-     * @brief Returns the number of Gaussian components
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNGaussians() const
-    { return m_base.getNGaussians();}
-
-    /**
-     * @brief Returns the feature dimensionality D
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNInputs() const
-    { return m_base.getNInputs(); }
-
-    /**
-     * @brief Returns the supervector length CD
-     * (CxD: Number of Gaussian components by the feature dimensionality)
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getSupervectorLength() const
-    { return m_base.getSupervectorLength(); }
-
-    /**
-     * @brief Returns the size/rank ru of the U matrix
-     */
-    const size_t getDimRu() const
-    { return m_base.getDimRu(); }
-
-    /**
-     * @brief Returns the size/rank rv of the V matrix
-     */
-    const size_t getDimRv() const
-    { return m_base.getDimRv(); }
-
-    /**
-     * @brief Resets the dimensionality of the subspace U and V
-     * U and V are hence uninitialized.
-     */
-    void resize(const size_t ru, const size_t rv)
-    { m_base.resize(ru, rv); }
-
-    /**
-     * @brief Returns the U matrix in order to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,2>& updateU()
-    { return m_base.updateU(); }
-
-    /**
-     * @brief Returns the V matrix in order to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,2>& updateV()
-    { return m_base.updateV(); }
-
-    /**
-     * @brief Returns the diagonal matrix diag(d) (as a 1D vector) in order
-     * to update it
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    blitz::Array<double,1>& updateD()
-    { return m_base.updateD(); }
-
-
-    /**
-     * @brief Sets (the mean supervector of) the Universal Background Model
-     * U, V and d are uninitialized in case of dimensions update (C or D)
-     */
-    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
-    { m_base.setUbm(ubm); }
-
-    /**
-     * @brief Sets the U matrix
-     */
-    void setU(const blitz::Array<double,2>& U)
-    { m_base.setU(U); }
-
-    /**
-     * @brief Sets the V matrix
-     */
-    void setV(const blitz::Array<double,2>& V)
-    { m_base.setV(V); }
-
-    /**
-     * @brief Sets the diagonal matrix diag(d)
-     * (a 1D vector is expected as an argument)
-     */
-    void setD(const blitz::Array<double,1>& d)
-    { m_base.setD(d); }
-
-    /**
-     * @brief Estimates x from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
-    { m_base.estimateX(gmm_stats, x); }
-
-    /**
-     * @brief Precompute (put U^{T}.Sigma^{-1} matrix in cache)
-     * @warning Should only be used by the trainer for efficiency reason,
-     *   or for testing purpose.
-     */
-    void precompute()
-    { m_base.updateCacheUbmUVD(); }
-
-    /**
-     * @brief Returns the FABase member
-     */
-    const bob::learn::misc::FABase& getBase() const
-    { return m_base; }
-
-
-  private:
-    // FABase
-    bob::learn::misc::FABase m_base;
-};
-
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_JFABASE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/JFAMachine.h b/bob/learn/misc/include/bob.learn.misc/JFAMachine.h
deleted file mode 100644
index 54c93b9ef4e85b99218f730341e908de00876837..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/JFAMachine.h
+++ /dev/null
@@ -1,254 +0,0 @@
-/**
- * @date Tue Jan 27 16:47:00 2015 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief A base class for Joint Factor Analysis-like machines
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_JFAMACHINE_H
-#define BOB_LEARN_MISC_JFAMACHINE_H
-
-#include <stdexcept>
-
-#include <bob.learn.misc/JFABase.h>
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/LinearScoring.h>
-
-#include <bob.io.base/HDF5File.h>
-#include <boost/shared_ptr.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-
-/**
- * @brief A JFAMachine which is associated to a JFABase that contains
- *   U, V and D matrices. The JFAMachine describes the identity part
- *   (latent variables y and z)
- * TODO: add a reference to the journal articles
- */
-class JFAMachine
-{
-  public:
-    /**
-     * @brief Default constructor. Builds an otherwise invalid 0 x 0 JFAMachine
-     * The Universal Background Model and the matrices U, V and diag(d) are
-     * not initialized.
-     */
-    JFAMachine();
-
-    /**
-     * @brief Constructor. Builds a new JFAMachine.
-     *
-     * @param jfa_base The JFABase associated with this machine
-     */
-    JFAMachine(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base);
-
-    /**
-     * @brief Copy constructor
-     */
-    JFAMachine(const JFAMachine& other);
-
-    /**
-     * @deprecated Starts a new JFAMachine from an existing Configuration object.
-     */
-    JFAMachine(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Just to virtualise the destructor
-     */
-    virtual ~JFAMachine();
-
-    /**
-     * @brief Assigns from a different JFA machine
-     */
-    JFAMachine& operator=(const JFAMachine &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const JFAMachine& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const JFAMachine& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const JFAMachine& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Saves machine to an HDF5 file
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets
-     * the current state.
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Returns the number of Gaussian components C
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNGaussians() const
-    { return m_jfa_base->getNGaussians(); }
-
-    /**
-     * @brief Returns the feature dimensionality D
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getNInputs() const
-    { return m_jfa_base->getNInputs(); }
-
-    /**
-     * @brief Returns the supervector length CD
-     * (CxD: Number of Gaussian components by the feature dimensionality)
-     * @warning An exception is thrown if no Universal Background Model has
-     *   been set yet.
-     */
-    const size_t getSupervectorLength() const
-    { return m_jfa_base->getSupervectorLength(); }
-
-    /**
-     * @brief Returns the size/rank ru of the U matrix
-     */
-    const size_t getDimRu() const
-    { return m_jfa_base->getDimRu(); }
-
-    /**
-     * @brief Returns the size/rank rv of the V matrix
-     */
-    const size_t getDimRv() const
-    { return m_jfa_base->getDimRv(); }
-
-    /**
-     * @brief Returns the x session factor
-     */
-    const blitz::Array<double,1>& getX() const
-    { return m_cache_x; }
-
-    /**
-     * @brief Returns the y speaker factor
-     */
-    const blitz::Array<double,1>& getY() const
-    { return m_y; }
-
-    /**
-     * @brief Returns the z speaker factor
-     */
-    const blitz::Array<double,1>& getZ() const
-    { return m_z; }
-
-    /**
-     * @brief Returns the y speaker factors in order to update it
-     */
-    blitz::Array<double,1>& updateY()
-    { return m_y; }
-
-    /**
-     * @brief Returns the z speaker factors in order to update it
-     */
-    blitz::Array<double,1>& updateZ()
-    { return m_z; }
-
-    /**
-     * @brief Returns the y speaker factors
-     */
-    void setY(const blitz::Array<double,1>& y);
-
-    /**
-     * @brief Returns the V matrix
-     */
-    void setZ(const blitz::Array<double,1>& z);
-
-    /**
-     * @brief Returns the JFABase
-     */
-    const boost::shared_ptr<bob::learn::misc::JFABase> getJFABase() const
-    { return m_jfa_base; }
-
-    /**
-     * @brief Sets the JFABase
-     */
-    void setJFABase(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base);
-
-
-    /**
-     * @brief Estimates x from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
-    { m_jfa_base->estimateX(gmm_stats, x); }
-    /**
-     * @brief Estimates Ux from the GMM statistics considering the LPT
-     * assumption, that is the latent session variable x is approximated
-     * using the UBM
-     */
-    void estimateUx(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
-
-   /**
-    * @brief Execute the machine
-    *
-    * @param input input data used by the machine
-    * @warning Inputs are checked
-    * @return score value computed by the machine
-    */
-    double forward(const bob::learn::misc::GMMStats& input);
-    /**
-     * @brief Computes a score for the given UBM statistics and given the
-     * Ux vector
-     */
-    double forward(const bob::learn::misc::GMMStats& gmm_stats,
-      const blitz::Array<double,1>& Ux);
-
-    /**
-     * @brief Execute the machine
-     *
-     * @param input input data used by the machine
-     * @param score value computed by the machine
-     * @warning Inputs are NOT checked
-     */
-    double forward_(const bob::learn::misc::GMMStats& input);
-
-  private:
-    /**
-     * @brief Resize latent variable according to the JFABase
-     */
-    void resize();
-    /**
-     * @brief Resize working arrays
-     */
-    void resizeTmp();
-    /**
-     * @brief Update the cache
-     */
-    void updateCache();
-
-    // UBM
-    boost::shared_ptr<bob::learn::misc::JFABase> m_jfa_base;
-
-    // y and z vectors/factors learned during the enrolment procedure
-    blitz::Array<double,1> m_y;
-    blitz::Array<double,1> m_z;
-
-    // cache
-    blitz::Array<double,1> m_cache_mVyDz;
-    mutable blitz::Array<double,1> m_cache_x;
-
-    // x vector/factor in cache when computing scores
-    mutable blitz::Array<double,1> m_tmp_Ux;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_JFAMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/JFATrainer.h b/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
deleted file mode 100644
index 99070b5b2ae6299c7ce2354e8f84783e456bd46b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
+++ /dev/null
@@ -1,238 +0,0 @@
-/**
- * @date Tue Jul 19 12:16:17 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief JFA functions
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_JFATRAINER_H
-#define BOB_LEARN_MISC_JFATRAINER_H
-
-#include <blitz/array.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.learn.misc/FABaseTrainer.h>
-#include <bob.learn.misc/JFAMachine.h>
-#include <vector>
-
-#include <map>
-#include <string>
-#include <bob.core/array_copy.h>
-#include <boost/shared_ptr.hpp>
-#include <boost/random.hpp>
-#include <bob.core/logging.h>
-
-namespace bob { namespace learn { namespace misc {
-
-class JFATrainer
-{
-  public:
-    /**
-     * @brief Constructor
-     */
-    JFATrainer();
-
-    /**
-     * @brief Copy onstructor
-     */
-    JFATrainer(const JFATrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~JFATrainer();
-
-    /**
-     * @brief Assignment operator
-     */
-    JFATrainer& operator=(const JFATrainer& other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const JFATrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const JFATrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const JFATrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Sets the maximum number of EM-like iterations (for each subspace)
-     */
-    //void setMaxIterations(const size_t max_iterations)
-    //{ m_max_iterations = max_iterations; }
-
-    /**
-     * @brief Gets the maximum number of EM-like iterations (for each subspace)
-     */
-    //size_t getMaxIterations() const
-    //{ return m_max_iterations; }
-
-    /**
-     * @brief This methods performs some initialization before the EM loop.
-     */
-    virtual void initialize(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief This methods performs the e-Step to train the first subspace V
-     */
-    virtual void eStep1(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the m-Step to train the first subspace V
-     */
-    virtual void mStep1(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the finalization after training the first
-     * subspace V
-     */
-    virtual void finalize1(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the e-Step to train the second subspace U
-     */
-    virtual void eStep2(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the m-Step to train the second subspace U
-     */
-    virtual void mStep2(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the finalization after training the second
-     * subspace U
-     */
-    virtual void finalize2(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the e-Step to train the third subspace d
-     */
-    virtual void eStep3(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the m-Step to train the third subspace d
-     */
-    virtual void mStep3(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs the finalization after training the third
-     * subspace d
-     */
-    virtual void finalize3(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief This methods performs the main loops to train the subspaces U, V and d
-     */
-    //virtual void train_loop(bob::learn::misc::JFABase& machine,
-      //const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods trains the subspaces U, V and d
-     */
-    //virtual void train(bob::learn::misc::JFABase& machine,
-      //const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief Enrol a client
-     */
-    void enrol(bob::learn::misc::JFAMachine& machine,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& features,
-      const size_t n_iter);
-
-    /**
-     * @brief Sets the Random Number Generator
-     */
-    void setRng(const boost::shared_ptr<boost::mt19937> rng)
-    { m_rng = rng; }
-
-    /**
-     * @brief Gets the Random Number Generator
-     */
-    const boost::shared_ptr<boost::mt19937> getRng() const
-    { return m_rng; }
-
-    /**
-     * @brief Get the x speaker factors
-     */
-    const std::vector<blitz::Array<double,2> >& getX() const
-    { return m_base_trainer.getX(); }
-    /**
-     * @brief Get the y speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getY() const
-    { return m_base_trainer.getY(); }
-    /**
-     * @brief Get the z speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getZ() const
-    { return m_base_trainer.getZ(); }
-    /**
-     * @brief Set the x speaker factors
-     */
-    void setX(const std::vector<blitz::Array<double,2> >& X)
-    { m_base_trainer.setX(X); }
-    /**
-     * @brief Set the y speaker factors
-     */
-    void setY(const std::vector<blitz::Array<double,1> >& y)
-    { m_base_trainer.setY(y); }
-    /**
-     * @brief Set the z speaker factors
-     */
-    void setZ(const std::vector<blitz::Array<double,1> >& z)
-    { m_base_trainer.setZ(z); }
-
-    /**
-     * @brief Getters for the accumulators
-     */
-    const blitz::Array<double,3>& getAccVA1() const
-    { return m_base_trainer.getAccVA1(); }
-    const blitz::Array<double,2>& getAccVA2() const
-    { return m_base_trainer.getAccVA2(); }
-    const blitz::Array<double,3>& getAccUA1() const
-    { return m_base_trainer.getAccUA1(); }
-    const blitz::Array<double,2>& getAccUA2() const
-    { return m_base_trainer.getAccUA2(); }
-    const blitz::Array<double,1>& getAccDA1() const
-    { return m_base_trainer.getAccDA1(); }
-    const blitz::Array<double,1>& getAccDA2() const
-    { return m_base_trainer.getAccDA2(); }
-
-    /**
-     * @brief Setters for the accumulators, Very useful if the e-Step needs
-     * to be parallelized.
-     */
-    void setAccVA1(const blitz::Array<double,3>& acc)
-    { m_base_trainer.setAccVA1(acc); }
-    void setAccVA2(const blitz::Array<double,2>& acc)
-    { m_base_trainer.setAccVA2(acc); }
-    void setAccUA1(const blitz::Array<double,3>& acc)
-    { m_base_trainer.setAccUA1(acc); }
-    void setAccUA2(const blitz::Array<double,2>& acc)
-    { m_base_trainer.setAccUA2(acc); }
-    void setAccDA1(const blitz::Array<double,1>& acc)
-    { m_base_trainer.setAccDA1(acc); }
-    void setAccDA2(const blitz::Array<double,1>& acc)
-    { m_base_trainer.setAccDA2(acc); }
-
-
-  private:
-    // Attributes
-    //size_t m_max_iterations;
-    boost::shared_ptr<boost::mt19937> m_rng; ///< The random number generator for the inialization
-    bob::learn::misc::FABaseTrainer m_base_trainer;
-};
-
-} } } // namespaces
-
-#endif /* BOB_LEARN_MISC_JFATRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h b/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h
deleted file mode 100644
index 5f8f5bf27a35ce2eb6fb464c3db3f252b6928e9b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h
+++ /dev/null
@@ -1,244 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-#ifndef BOB_LEARN_MISC_KMEANSMACHINE_H
-#define BOB_LEARN_MISC_KMEANSMACHINE_H
-
-#include <blitz/array.h>
-#include <cfloat>
-
-#include <bob.io.base/HDF5File.h>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class implements a k-means classifier.
- * @details See Section 9.1 of Bishop, "Pattern recognition and machine learning", 2006
- */
-class KMeansMachine {
-  public:
-    /**
-     * Default constructor. Builds an otherwise invalid 0 x 0 k-means
-     * machine. This is equivalent to construct a LinearMachine with two
-     * size_t parameters set to 0, as in LinearMachine(0, 0).
-     */
-    KMeansMachine();
-
-    /**
-     * Constructor
-     * @param[in] n_means  The number of means
-     * @param[in] n_inputs The feature dimensionality
-     */
-    KMeansMachine(const size_t n_means, const size_t n_inputs);
-
-    /**
-     * Builds a new machine with the given means. Each row of the means
-     * matrix should represent a mean.
-     */
-    KMeansMachine(const blitz::Array<double,2>& means);
-
-    /**
-     * Copies another machine (copy constructor)
-     */
-    KMeansMachine(const KMeansMachine& other);
-
-    /**
-     * Starts a new KMeansMachine from an existing Configuration object.
-     */
-    KMeansMachine(bob::io::base::HDF5File& config);
-
-    /**
-     * Destructor
-     */
-    virtual ~KMeansMachine();
-
-    /**
-     * Assigns from a different machine
-     */
-    KMeansMachine& operator=(const KMeansMachine& other);
-
-    /**
-     * Equal to
-     */
-    bool operator==(const KMeansMachine& b) const;
-
-    /**
-     * Not equal to
-     */
-    bool operator!=(const KMeansMachine& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const KMeansMachine& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * Loads data from an existing configuration object. Resets the current
-     * state.
-     */
-    void load(bob::io::base::HDF5File& config);
-
-    /**
-     * Saves an existing machine to a Configuration object.
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * Output the minimum (Square Euclidean) distance between the input and
-     * one of the means (overrides Machine::forward)
-     */
-    void forward(const blitz::Array<double,1>& input, double& output) const;
-
-    /**
-     * Output the minimum (Square Euclidean) distance between the input and
-     * one of the means (overrides Machine::forward_)
-     * @warning Inputs are NOT checked
-     */
-    void forward_(const blitz::Array<double,1>& input, double& output) const;
-
-
-    /**
-     * Set the means
-     */
-    void setMeans(const blitz::Array<double,2>& means);
-
-    /**
-     * Set the i'th mean
-     */
-    void setMean(const size_t i, const blitz::Array<double,1>& mean);
-
-    /**
-     * Get a mean
-     * @param[in]   i    The index of the mean
-     * @param[out] mean The mean, a 1D array, with a length equal to the number of feature dimensions.
-     */
-    const blitz::Array<double,1> getMean(const size_t i) const;
-
-    /**
-     * Get the means (i.e. a 2D array, with as many rows as means, and as
-     * many columns as feature dimensions.)
-     */
-    const blitz::Array<double,2>& getMeans() const
-    { return m_means; }
-
-     /**
-     * Get the means in order to be updated (i.e. a 2D array, with as many
-     * rows as means, and as many columns as feature dimensions.)
-     * @warning Only trainers should use this function for efficiency reasons
-     */
-    blitz::Array<double,2>& updateMeans()
-    { return m_means; }
-
-    /**
-     * Return the power of two of the (Square Euclidean) distance of the
-     * sample, x, to the i'th mean
-     * @param x The data sample (feature vector)
-     * @param i The index of the mean
-     */
-    double getDistanceFromMean(const blitz::Array<double,1>& x,
-      const size_t i) const;
-
-    /**
-     * Calculate the index of the mean that is closest
-     * (in terms of Square Euclidean distance) to the data sample, x
-     * @param x The data sample (feature vector)
-     * @param closest_mean (output) The index of the mean closest to the sample
-     * @param min_distance (output) The distance of the sample from the closest mean
-     */
-    void getClosestMean(const blitz::Array<double,1>& x,
-      size_t &closest_mean, double &min_distance) const;
-
-    /**
-     * Output the minimum (Square Euclidean) distance between the input and
-     * one of the means
-     */
-    double getMinDistance(const blitz::Array<double,1>& input) const;
-
-    /**
-     * For each mean, find the subset of the samples
-     * that is closest to that mean, and calculate
-     * 1) the variance of that subset (the cluster variance)
-     * 2) the proportion of the samples represented by that subset (the cluster weight)
-     * @param[in]  data      The data
-     * @param[out] variances The cluster variances (one row per cluster),
-     *                       with as many columns as feature dimensions.
-     * @param[out] weights   A vector of weights, one per cluster
-     */
-    void getVariancesAndWeightsForEachCluster(const blitz::Array<double,2> &data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
-    /**
-     * Methods consecutively called by getVariancesAndWeightsForEachCluster()
-     * This should help for the parallelization on several nodes by splitting the data and calling
-     * getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum
-     * with the m_cache_means, variances, and weights variables before performing the merge on one
-     * node using getVariancesAndWeightsForEachClusterFin().
-     */
-    void getVariancesAndWeightsForEachClusterInit(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
-    void getVariancesAndWeightsForEachClusterAcc(const blitz::Array<double,2> &data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
-    void getVariancesAndWeightsForEachClusterFin(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
-
-    /**
-     * Get the m_cache_means array.
-     * @warning This variable should only be used in the case you want to parallelize the
-     * getVariancesAndWeightsForEachCluster() method!
-     */
-    const blitz::Array<double,2>& getCacheMeans() const
-    { return m_cache_means; }
-
-    /**
-     * Set the m_cache_means array.
-     * @warning This variable should only be used in the case you want to parallelize the
-     * getVariancesAndWeightsForEachCluster() method!
-     */
-    void setCacheMeans(const blitz::Array<double,2>& cache_means);
-
-    /**
-     * Resize the means
-     */
-    void resize(const size_t n_means, const size_t n_inputs);
-
-    /**
-     * Return the number of means
-     */
-    size_t getNMeans() const { return m_n_means; }
-
-    /**
-     * Return the number of inputs
-     */
-    size_t getNInputs() const { return m_n_inputs; }
-
-    /**
-     * Prints a KMeansMachine in the output stream
-     */
-    friend std::ostream& operator<<(std::ostream& os, const KMeansMachine& km);
-
-
-  private:
-     /**
-     * The number of means
-     */
-    size_t m_n_means;
-
-    /**
-     * The number of inputs
-     */
-    size_t m_n_inputs;
-
-    /**
-     * The means (each row is a mean)
-     */
-    blitz::Array<double,2> m_means;
-
-    /**
-     * cache to avoid re-allocation
-     */
-    mutable blitz::Array<double,2> m_cache_means;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_KMEANSMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h b/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h
deleted file mode 100644
index 432c8296134c16c09154d5cee891dd94769c1cf3..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h
+++ /dev/null
@@ -1,187 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-#ifndef BOB_LEARN_MISC_KMEANSTRAINER_H
-#define BOB_LEARN_MISC_KMEANSTRAINER_H
-
-#include <bob.learn.misc/KMeansMachine.h>
-#include <boost/version.hpp>
-#include <boost/random/mersenne_twister.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * Trains a KMeans machine.
- * @brief This class implements the expectation-maximisation algorithm for a k-means machine.
- * @details See Section 9.1 of Bishop, "Pattern recognition and machine learning", 2006
- *          It uses a random initialisation of the means followed by the expectation-maximization algorithm
- */
-class KMeansTrainer
-{
-  public:
-    /**
-     * @brief This enumeration defines different initialization methods for
-     * K-means
-     */
-    typedef enum {
-      RANDOM=0,
-      RANDOM_NO_DUPLICATE
-#if BOOST_VERSION >= 104700
-      ,
-      KMEANS_PLUS_PLUS
-#endif
-    }
-    InitializationMethod;
-
-    /**
-     * @brief Constructor
-     */
-    KMeansTrainer(InitializationMethod=RANDOM);
-
-    /*     
-    KMeansTrainer(double convergence_threshold=0.001,
-      size_t max_iterations=10, bool compute_likelihood=true,
-      InitializationMethod=RANDOM);*/
-      
-
-    /**
-     * @brief Virtualize destructor
-     */
-    virtual ~KMeansTrainer() {}
-
-    /**
-     * @brief Copy constructor
-     */
-    KMeansTrainer(const KMeansTrainer& other);
-
-    /**
-     * @brief Assigns from a different machine
-     */
-    KMeansTrainer& operator=(const KMeansTrainer& other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const KMeansTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const KMeansTrainer& b) const;
-
-    /**
-     * @brief The name for this trainer
-     */
-    virtual std::string name() const { return "KMeansTrainer"; }
-
-    /**
-     * @brief Initialise the means randomly.
-     * Data is split into as many chunks as there are means,
-     * then each mean is set to a random example within each chunk.
-     */
-    void initialize(bob::learn::misc::KMeansMachine& kMeansMachine,
-      const blitz::Array<double,2>& sampler);
-
-    /**
-     * @brief Accumulate across the dataset:
-     * - zeroeth and first order statistics
-     * - average (Square Euclidean) distance from the closest mean
-     * Implements EMTrainer::eStep(double &)
-     */
-    void eStep(bob::learn::misc::KMeansMachine& kmeans,
-      const blitz::Array<double,2>& data);
-
-    /**
-     * @brief Updates the mean based on the statistics from the E-step.
-     */
-    void mStep(bob::learn::misc::KMeansMachine& kmeans);
-
-    /**
-     * @brief This functions returns the average min (Square Euclidean)
-     * distance (average distance to the closest mean)
-     */
-    double computeLikelihood(bob::learn::misc::KMeansMachine& kmeans);
-
-
-    /**
-     * @brief Reset the statistics accumulators
-     * to the correct size and a value of zero.
-     */
-    bool resetAccumulators(bob::learn::misc::KMeansMachine& kMeansMachine);
-
-    /**
-     * @brief Sets the Random Number Generator
-     */
-    void setRng(const boost::shared_ptr<boost::mt19937> rng)
-    { m_rng = rng; }
-
-    /**
-     * @brief Gets the Random Number Generator
-     */
-    const boost::shared_ptr<boost::mt19937> getRng() const
-    { return m_rng; }
-
-    /**
-     * @brief Sets the initialization method used to generate the initial means
-     */
-    void setInitializationMethod(InitializationMethod v) { m_initialization_method = v; }
-
-    /**
-     * @brief Gets the initialization method used to generate the initial means
-     */
-    InitializationMethod getInitializationMethod() const { return m_initialization_method; }
-
-    /**
-     * @brief Returns the internal statistics. Useful to parallelize the E-step
-     */
-    const blitz::Array<double,1>& getZeroethOrderStats() const { return m_zeroethOrderStats; }
-    const blitz::Array<double,2>& getFirstOrderStats() const { return m_firstOrderStats; }
-    double getAverageMinDistance() const { return m_average_min_distance; }
-    /**
-     * @brief Sets the internal statistics. Useful to parallelize the E-step
-     */
-    void setZeroethOrderStats(const blitz::Array<double,1>& zeroethOrderStats);
-    void setFirstOrderStats(const blitz::Array<double,2>& firstOrderStats);
-    void setAverageMinDistance(const double value) { m_average_min_distance = value; }
-
-
-  private:
-  
-    /**
-     * @brief The initialization method
-     * Check that there is no duplicated means during the random initialization
-     */
-    InitializationMethod m_initialization_method;
-
-    /**
-     * @brief The random number generator for the inialization
-     */
-    boost::shared_ptr<boost::mt19937> m_rng;
-
-    /**
-     * @brief Average min (Square Euclidean) distance
-     */
-    double m_average_min_distance;
-
-    /**
-     * @brief Zeroeth order statistics accumulator.
-     * The k'th value in m_zeroethOrderStats is the denominator of
-     * equation 9.4, Bishop, "Pattern recognition and machine learning", 2006
-     */
-    blitz::Array<double,1> m_zeroethOrderStats;
-
-    /**
-     * @brief First order statistics accumulator.
-     * The k'th row of m_firstOrderStats is the numerator of
-     * equation 9.4, Bishop, "Pattern recognition and machine learning", 2006
-     */
-    blitz::Array<double,2> m_firstOrderStats;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_KMEANSTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/LinearScoring.h b/bob/learn/misc/include/bob.learn.misc/LinearScoring.h
deleted file mode 100644
index aeac7189e092c7178bcaaa3d036a623d8bbc1892..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/LinearScoring.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/**
- * @date Wed Jul 13 16:00:04 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-#ifndef BOB_LEARN_MISC_LINEARSCORING_H
-#define BOB_LEARN_MISC_LINEARSCORING_H
-
-#include <blitz/array.h>
-#include <boost/shared_ptr.hpp>
-#include <vector>
-#include <bob.learn.misc/GMMMachine.h>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * Compute a matrix of scores using linear scoring.
- *
- * @warning Each GMM must have the same size.
- *
- * @param models        list of mean supervector for the client models
- * @param ubm_mean      mean supervector of the world model
- * @param ubm_variance  variance supervector of the world model
- * @param test_stats    list of accumulate statistics for each test trial
- * @param test_channelOffset  list of channel offset if any (for JFA/ISA for instance)
- * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
- * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
- * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
- */
-void linearScoring(const std::vector<blitz::Array<double,1> >& models,
-                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const std::vector<blitz::Array<double, 1> >& test_channelOffset,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double,2>& scores);
-void linearScoring(const std::vector<blitz::Array<double,1> >& models,
-                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double,2>& scores);
-
-/**
- * Compute a matrix of scores using linear scoring.
- *
- * @warning Each GMM must have the same size.
- *
- * @param models      list of client models as GMMMachines
- * @param ubm         world model as a GMMMachine
- * @param test_stats  list of accumulate statistics for each test trial
- * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
- * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
- * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
- */
-void linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
-                   const bob::learn::misc::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double,2>& scores);
-/**
- * Compute a matrix of scores using linear scoring.
- *
- * @warning Each GMM must have the same size.
- *
- * @param models      list of client models as GMMMachines
- * @param ubm         world model as a GMMMachine
- * @param test_stats  list of accumulate statistics for each test trial
- * @param test_channelOffset  list of channel offset if any (for JFA/ISA for instance)
- * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
- * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
- * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
- */
-void linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
-                   const bob::learn::misc::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
-                   const std::vector<blitz::Array<double, 1> >& test_channelOffset,
-                   const bool frame_length_normalisation,
-                   blitz::Array<double,2>& scores);
-
-/**
- * Compute a score using linear scoring.
- *
- * @param model         mean supervector for the client model
- * @param ubm_mean      mean supervector of the world model
- * @param ubm_variance  variance supervector of the world model
- * @param test_stats    accumulate statistics of the test trial
- * @param test_channelOffset  channel offset
- * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
- */
-double linearScoring(const blitz::Array<double,1>& model,
-                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const bob::learn::misc::GMMStats& test_stats,
-                   const blitz::Array<double,1>& test_channelOffset,
-                   const bool frame_length_normalisation);
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_LINEARSCORING_H
diff --git a/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h b/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h
deleted file mode 100644
index c6c7cf73244137dcba3d38a3a21425a1042b7499..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h
+++ /dev/null
@@ -1,170 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * @brief This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation.
- * @details See Section 3.4 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000. We use a "single adaptation coefficient", alpha_i, and thus a single relevance factor, r.
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_MAP_GMMTRAINER_H
-#define BOB_LEARN_MISC_MAP_GMMTRAINER_H
-
-#include <bob.learn.misc/GMMBaseTrainer.h>
-#include <limits>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation.
- * @details See Section 3.4 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000. We use a "single adaptation coefficient", alpha_i, and thus a single relevance factor, r.
- */
-class MAP_GMMTrainer
-{
-  public:
-    /**
-     * @brief Default constructor
-     */
-    MAP_GMMTrainer(
-      const bool update_means=true,
-      const bool update_variances=false, 
-      const bool update_weights=false,
-      const double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon(),
-      const bool reynolds_adaptation=false, 
-      const double relevance_factor=4, 
-      const double alpha=0.5,
-      boost::shared_ptr<bob::learn::misc::GMMMachine> prior_gmm = 0);
-
-    /**
-     * @brief Copy constructor
-     */
-    MAP_GMMTrainer(const MAP_GMMTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~MAP_GMMTrainer();
-
-    /**
-     * @brief Initialization
-     */
-    void initialize(bob::learn::misc::GMMMachine& gmm);
-
-    /**
-     * @brief Assigns from a different MAP_GMMTrainer
-     */
-    MAP_GMMTrainer& operator=(const MAP_GMMTrainer &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const MAP_GMMTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const MAP_GMMTrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const MAP_GMMTrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Set the GMM to use as a prior for MAP adaptation.
-     * Generally, this is a "universal background model" (UBM),
-     * also referred to as a "world model".
-     */
-    bool setPriorGMM(boost::shared_ptr<bob::learn::misc::GMMMachine> prior_gmm);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset,
-     * and saves these as m_ss. Calculates the average
-     * log likelihood of the observations given the GMM,
-     * and returns this in average_log_likelihood.
-     *
-     * The statistics, m_ss, will be used in the mStep() that follows.
-     * Implements EMTrainer::eStep(double &)
-     */
-     void eStep(bob::learn::misc::GMMMachine& gmm,
-      const blitz::Array<double,2>& data){
-      m_gmm_base_trainer.eStep(gmm,data);
-     }
-
-
-    /**
-     * @brief Performs a maximum a posteriori (MAP) update of the GMM
-     * parameters using the accumulated statistics in m_ss and the
-     * parameters of the prior model
-     * Implements EMTrainer::mStep()
-     */
-    void mStep(bob::learn::misc::GMMMachine& gmm);
-
-    /**
-     * @brief Computes the likelihood using current estimates of the latent
-     * variables
-     */
-    double computeLikelihood(bob::learn::misc::GMMMachine& gmm){
-      return m_gmm_base_trainer.computeLikelihood(gmm);
-    }    
-    
-    bool getReynoldsAdaptation()
-    {return m_reynolds_adaptation;}
-
-    void setReynoldsAdaptation(const bool reynolds_adaptation)
-    {m_reynolds_adaptation = reynolds_adaptation;}
-    
-
-    double getRelevanceFactor()
-    {return m_relevance_factor;}
-
-    void setRelevanceFactor(const double relevance_factor)
-    {m_relevance_factor = relevance_factor;}
-
-
-    double getAlpha()
-    {return m_alpha;}
-
-    void setAlpha(const double alpha)
-    {m_alpha = alpha;}
-
-
-  protected:
-
-    /**
-     * The relevance factor for MAP adaptation, r (see Reynolds et al., \"Speaker Verification Using Adapted Gaussian Mixture Models\", Digital Signal Processing, 2000).
-     */
-    double m_relevance_factor;
-
-    /**
-    Base Trainer for the MAP algorithm. Basically implements the e-step
-    */ 
-    bob::learn::misc::GMMBaseTrainer m_gmm_base_trainer;
-
-    /**
-     * The GMM to use as a prior for MAP adaptation.
-     * Generally, this is a "universal background model" (UBM),
-     * also referred to as a "world model"
-     */
-    boost::shared_ptr<bob::learn::misc::GMMMachine> m_prior_gmm;
-
-    /**
-     * The alpha for the Torch3-like adaptation
-     */
-    double m_alpha;
-    /**
-     * Whether Torch3-like adaptation should be used or not
-     */
-    bool m_reynolds_adaptation;
-
-  private:
-    /// cache to avoid re-allocation
-    mutable blitz::Array<double,1> m_cache_alpha;
-    mutable blitz::Array<double,1> m_cache_ml_weights;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_MAP_GMMTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h b/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h
deleted file mode 100644
index 13cda7423f46d517d3793c6b02071926c8739acd..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h
+++ /dev/null
@@ -1,119 +0,0 @@
-/**
- * @date Tue May 10 11:35:58 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- *
- * @brief This class implements the maximum likelihood M-step of the expectation-maximisation algorithm for a GMM Machine.
- * @details See Section 9.2.2 of Bishop, "Pattern recognition and machine learning", 2006
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_ML_GMMTRAINER_H
-#define BOB_LEARN_MISC_ML_GMMTRAINER_H
-
-#include <bob.learn.misc/GMMBaseTrainer.h>
-#include <limits>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class implements the maximum likelihood M-step of the
- *   expectation-maximisation algorithm for a GMM Machine.
- * @details See Section 9.2.2 of Bishop,
- *  "Pattern recognition and machine learning", 2006
- */
-class ML_GMMTrainer{
-  public:
-    /**
-     * @brief Default constructor
-     */
-    ML_GMMTrainer(const bool update_means=true,
-                  const bool update_variances=false, 
-                  const bool update_weights=false,
-                  const double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon());
-
-    /**
-     * @brief Copy constructor
-     */
-    ML_GMMTrainer(const ML_GMMTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~ML_GMMTrainer();
-
-    /**
-     * @brief Initialisation before the EM steps
-     */
-    void initialize(bob::learn::misc::GMMMachine& gmm);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset,
-     * and saves these as m_ss. Calculates the average
-     * log likelihood of the observations given the GMM,
-     * and returns this in average_log_likelihood.
-     *
-     * The statistics, m_ss, will be used in the mStep() that follows.
-     * Implements EMTrainer::eStep(double &)
-     */
-     void eStep(bob::learn::misc::GMMMachine& gmm,
-      const blitz::Array<double,2>& data){
-      m_gmm_base_trainer.eStep(gmm,data);
-     }
-
-    /**
-     * @brief Performs a maximum likelihood (ML) update of the GMM parameters
-     * using the accumulated statistics in m_ss
-     * Implements EMTrainer::mStep()
-     */
-    void mStep(bob::learn::misc::GMMMachine& gmm);
-
-    /**
-     * @brief Computes the likelihood using current estimates of the latent
-     * variables
-     */
-    double computeLikelihood(bob::learn::misc::GMMMachine& gmm){
-      return m_gmm_base_trainer.computeLikelihood(gmm);
-    }
-
-
-    /**
-     * @brief Assigns from a different ML_GMMTrainer
-     */
-    ML_GMMTrainer& operator=(const ML_GMMTrainer &other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const ML_GMMTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const ML_GMMTrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const ML_GMMTrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-      
-    
-  protected:
-
-    /**
-    Base Trainer for the MAP algorithm. Basically implements the e-step
-    */ 
-    bob::learn::misc::GMMBaseTrainer m_gmm_base_trainer;
-
-
-  private:
-    /**
-     * @brief Add cache to avoid re-allocation at each iteration
-     */
-    mutable blitz::Array<double,1> m_cache_ss_n_thresholded;
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_ML_GMMTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h b/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h
deleted file mode 100644
index c75085781480758c75ff3652470cf595afef6da5..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h
+++ /dev/null
@@ -1,702 +0,0 @@
-/**
- * @date Fri Oct 14 18:07:56 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Machines that implements the Probabilistic Linear Discriminant
- *   Analysis Model of Prince and Helder,
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_PLDAMACHINE_H
-#define BOB_LEARN_MISC_PLDAMACHINE_H
-
-#include <blitz/array.h>
-#include <bob.io.base/HDF5File.h>
-#include <map>
-#include <iostream>
-#include <stdexcept>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class is a container for the \f$F\f$, \f$G\f$ and \f$\Sigma\f$
- * matrices and the mean vector \f$\mu\f$ of a PLDA model. This also
- * precomputes useful matrices to make the model scalable.\n
- * References:\n
- * 1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis:
- *     Applied to Face Recognition', Laurent El Shafey, Chris McCool,
- *     Roy Wallace, Sebastien Marcel, TPAMI'2013
- * 2. 'Probabilistic Linear Discriminant Analysis for Inference About
- *     Identity', Prince and Elder, ICCV'2007\n
- * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
- *     Elder and Prince, TPAMI'2012
- */
-class PLDABase
-{
-  public:
-    /**
-     * @brief Default constructor.\n Builds an otherwise invalid 0x0x0
-     * PLDABase.
-     */
-    PLDABase();
-    /**
-     * @brief Constructor, builds a new PLDABase.\n \f$F\f$, \f$G\f$
-     * and \f$\Sigma\f$ are initialized to the 'eye' matrix (matrix with 1's
-     * on the diagonal and 0 outside), and \f$\mu\f$ is initialized to 0.
-     *
-     * @param dim_d Dimensionality of the feature vector
-     * @param dim_f size of \f$F\f$ (dim_d x dim_f)
-     * @param dim_g size of \f$G\f$ (dim_d x dim_g)
-     * @param variance_threshold The smallest possible value of the variance
-     *                           (Ignored if set to 0.)
-     */
-    PLDABase(const size_t dim_d, const size_t dim_f,
-      const size_t dim_g, const double variance_threshold=0.);
-    /**
-     * @brief Copies another PLDABase
-     */
-    PLDABase(const PLDABase& other);
-    /**
-     * @brief Starts a new PLDABase from an existing configuration
-     * object.
-     * @param config HDF5 configuration file
-     */
-    PLDABase(bob::io::base::HDF5File& config);
-
-    /**
-     * @brief Just to virtualize the destructor
-     */
-    virtual ~PLDABase();
-
-    /**
-     * @brief Assigns from a different PLDABase
-     */
-    PLDABase& operator=(const PLDABase &other);
-
-    /**
-     * @brief Equal to.\n Even precomputed members such as \f$\alpha\f$,
-     * \f$\beta\f$ and \f$\gamma_a\f$'s are compared!
-     */
-    bool operator==(const PLDABase& b) const;
-    /**
-     * @brief Not equal to.\n Defined as the negation of operator==
-     */
-    bool operator!=(const PLDABase& b) const;
-    /**
-     * @brief Similar to.\n Even precomputed members such as \f$\alpha\f$,
-     * \f$\beta\f$ and \f$\gamma_a\f$'s are compared!
-     */
-    bool is_similar_to(const PLDABase& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets the
-     * current state.
-     * @param config HDF5 configuration file
-     */
-    void load(bob::io::base::HDF5File& config);
-    /**
-     * @brief Saves an existing machine to a configuration object.
-     * @param config HDF5 configuration file
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Resizes the PLDABase.
-     * @warning \f$F\f$, \f$G\f$, \f$\Sigma\f$, \f$\mu\f$ and the variance
-     * flooring thresholds will be reinitialized!
-     * @param dim_d Dimensionality of the feature vector
-     * @param dim_f Rank of \f$F\f$ (dim_d x dim_f)
-     * @param dim_g Rank of \f$G\f$ (dim_d x dim_g)
-     */
-    void resize(const size_t dim_d, const size_t dim_f, const size_t dim_g);
-
-    /**
-     * @brief Gets the \f$F\f$ subspace/matrix of the PLDA model
-     */
-    const blitz::Array<double,2>& getF() const
-    { return m_F; }
-    /**
-     * @brief Sets the \f$F\f$ subspace/matrix of the PLDA model
-     */
-    void setF(const blitz::Array<double,2>& F);
-    /**
-     * @brief Returns the current \f$F\f$ matrix/subspace of the PLDA model
-     * in order to be updated.
-     * @warning Use with care. Only trainers should use this function for
-     * efficiency reasons.
-     */
-    blitz::Array<double,2>& updateF()
-    { return m_F; }
-
-    /**
-     * @brief Gets the \f$G\f$ subspace/matrix of the PLDA model
-     */
-    const blitz::Array<double,2>& getG() const
-    { return m_G; }
-    /**
-     * @brief Sets the \f$G\f$ subspace/matrix of the PLDA model
-     */
-    void setG(const blitz::Array<double,2>& G);
-    /**
-     * @brief Returns the current \f$G\f$ subspace/matrix of the PLDA model
-     * in order to be updated.
-     * @warning Use with care. Only trainers should use this function for
-     * efficiency reasons.
-     */
-    blitz::Array<double,2>& updateG()
-    { return m_G; }
-
-    /**
-     * @brief Gets the \f$\Sigma\f$ (diagonal) covariance matrix of the PLDA
-     * model
-     */
-    const blitz::Array<double,1>& getSigma() const
-    { return m_sigma; }
-    /**
-     * @brief Sets the \f$\Sigma\f$ (diagonal) covariance matrix of the PLDA
-     * model
-     */
-    void setSigma(const blitz::Array<double,1>& s);
-    /**
-     * @brief Returns the current \f$\Sigma\f$ (diagonal) covariance matrix of
-     * the PLDA model in order to be updated.
-     * @warning Use with care. Only trainers should use this function for
-     * efficiency reasons. Variance threshold should be applied after
-     * updating \f$\Sigma\f$!
-     */
-    blitz::Array<double,1>& updateSigma()
-    { return m_sigma; }
-
-    /**
-     * @brief Gets the \f$\mu\f$ mean vector of the PLDA model
-     */
-    const blitz::Array<double,1>& getMu() const
-    { return m_mu; }
-    /**
-     * @brief Sets the \f$\mu\f$ mean vector of the PLDA model
-     */
-    void setMu(const blitz::Array<double,1>& mu);
-    /**
-     * @brief Returns the current \f$\mu\f$ mean vector of the PLDA model
-     * in order to be updated.
-     * @warning Use with care. Only trainers should use this function for
-     * efficiency reasons.
-     */
-    blitz::Array<double,1>& updateMu()
-    { return m_mu; }
-
-    /**
-     * @brief Gets the variance flooring threshold
-     */
-    double getVarianceThreshold() const
-    { return m_variance_threshold; }
-    /**
-     * @brief Sets the variance flooring threshold
-     */
-    void setVarianceThreshold(const double value);
-    /**
-     * @brief Apply the variance flooring thresholds.
-     * This method is automatically called when using setVarianceThresholds().
-     * @warning It is only useful when using updateVarianceThreshods(),
-     * and should mostly be done by trainers
-     */
-    void applyVarianceThreshold();
-
-    /**
-     * @brief Gets the feature dimensionality
-     */
-    size_t getDimD() const
-    { return m_dim_d; }
-    /**
-     * @brief Gets the size/rank the \f$F\f$ subspace/matrix of the PLDA model
-     */
-    size_t getDimF() const
-    { return m_dim_f; }
-    /**
-     * @brief Gets the size/rank the \f$G\f$ subspace/matrix of the PLDA model
-     */
-    size_t getDimG() const
-    { return m_dim_g; }
-
-    /**
-     * @brief Precomputes useful values such as \f$\Sigma^{-1}\f$,
-     * \f$G^{T}\Sigma^{-1}\f$, \f$\alpha\f$, \f$\beta\f$, and
-     * \f$F^{T}\beta\f$.
-     * @warning Previous \f$\gamma_a\f$ values and log likelihood constant
-     * terms are cleared.
-     */
-    void precompute();
-    /**
-     * @brief Precomputes useful values for the log likelihood
-     * \f$\log(\det(\alpha))\f$ and \f$\log(\det(\Sigma))\f$.
-     */
-    void precomputeLogLike();
-    /**
-     * @brief Gets the inverse vector/diagonal matrix of \f$\Sigma^{-1}\f$
-     */
-    const blitz::Array<double,1>& getISigma() const
-    { return m_cache_isigma; }
-    /**
-     * @brief Gets the \f$\alpha\f$ matrix.
-     * \f$\alpha = (Id + G^T \Sigma^{-1} G)^{-1} = \mathcal{G}\f$
-     */
-    const blitz::Array<double,2>& getAlpha() const
-    { return m_cache_alpha; }
-    /**
-     * @brief Gets the \f$\beta\f$ matrix
-     * \f$\beta = (\Sigma + G G^T)^{-1} = \mathcal{S} =
-     *    \Sigma^{-1} - \Sigma^{-1} G \mathcal{G} G^{T} \Sigma^{-1}\f$
-     */
-    const blitz::Array<double,2>& getBeta() const
-    { return m_cache_beta; }
-    /**
-     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
-     * samples).
-     * \f$\gamma_{a} = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
-     * @warning an exception is thrown if \f$\gamma_a\f$ does not exists
-     */
-    const blitz::Array<double,2>& getGamma(const size_t a) const;
-    /**
-     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
-     * samples).
-     * \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
-     * @warning The matrix is computed if it does not already exists
-     */
-    const blitz::Array<double,2>& getAddGamma(const size_t a);
-    /**
-     * @brief Gets the \f$F^T \beta\f$ matrix
-     */
-    const blitz::Array<double,2>& getFtBeta() const
-    { return m_cache_Ft_beta; }
-    /**
-     * @brief Gets the \f$G^T \Sigma^{-1}\f$ matrix
-     */
-    const blitz::Array<double,2>& getGtISigma() const
-    { return m_cache_Gt_isigma; }
-    /**
-     * @brief Gets \f$\log(\det(\alpha))\f$
-     */
-    double getLogDetAlpha() const
-    { return m_cache_logdet_alpha; }
-    /**
-     * @brief Gets \f$\log(\det(\Sigma))\f$
-     */
-    double getLogDetSigma() const
-    { return m_cache_logdet_sigma; }
-    /**
-     * @brief Computes the log likelihood constant term for a given \f$a\f$
-     * (number of samples), given the provided \f$\gamma_a\f$ matrix
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     */
-    double computeLogLikeConstTerm(const size_t a,
-      const blitz::Array<double,2>& gamma_a) const;
-    /**
-     * @brief Computes the log likelihood constant term for a given \f$a\f$
-     * (number of samples)
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     * @warning: gamma_a will be computed and added if it does
-     *  not already exists
-     */
-    double computeLogLikeConstTerm(const size_t a);
-    /**
-     * @brief Tells if the log likelihood constant term for a given \f$a\f$
-     * (number of samples) exists
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     */
-    bool hasLogLikeConstTerm(const size_t a) const
-    { return (m_cache_loglike_constterm.find(a) != m_cache_loglike_constterm.end()); }
-    /**
-     * @brief Gets the log likelihood constant term for a given \f$a\f$
-     * (number of samples)
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     * @warning an exception is thrown if the value does not exists
-     */
-    double getLogLikeConstTerm(const size_t a) const;
-    /**
-     * @brief Gets the log likelihood constant term for a given \f$a\f$
-     * (number of samples)
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     * @warning The value is computed if it does not already exists
-     */
-    double getAddLogLikeConstTerm(const size_t a);
-
-    /**
-     * @brief Computes the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number
-     * of samples) and put the result in the provided array.
-     * \f$\gamma_a = (Id + a F^T \beta F)^{-1}\f$
-     */
-    void computeGamma(const size_t a, blitz::Array<double,2> res) const;
-    /**
-     * @brief Tells if the \f$\gamma_a\f$ matrix for a given a (number of
-     * samples) exists.
-     * \f$\gamma_a = (Id + a F^T \beta F)^{-1}\f$
-     */
-    bool hasGamma(const size_t a) const
-    { return (m_cache_gamma.find(a) != m_cache_gamma.end()); }
-
-    /**
-     * @brief Clears the maps (\f$\gamma_a\f$ and loglike_constterm_a).
-     */
-    void clearMaps();
-
-    /**
-     * @brief Gets the log-likelihood of an observation, given the current model
-     * and the latent variables (point estimate).\n
-     * This will basically compute \f$p(x_{ij} | h_{i}, w_{ij}, \Theta)\f$\n
-     * , given by \n
-     * \f$\mathcal{N}(x_{ij}|[\mu + F h_{i} + G w_{ij} + \epsilon_{ij}, \Sigma])\f$\n
-     * , which is in logarithm, \n
-     * \f$-\frac{D}{2} log(2\pi) -\frac{1}{2} log(det(\Sigma)) -\frac{1}{2} {(x_{ij}-(\mu+F h_{i}+G w_{ij}))^{T}\Sigma^{-1}(x_{ij}-(\mu+F h_{i}+G w_{ij}))}\f$.
-     */
-    double computeLogLikelihoodPointEstimate(const blitz::Array<double,1>& xij,
-      const blitz::Array<double,1>& hi, const blitz::Array<double,1>& wij) const;
-
-    // Friend method declaration
-    friend std::ostream& operator<<(std::ostream& os, const PLDABase& m);
-
-
-  private:
-    // Attributes
-    size_t m_dim_d; ///< Dimensionality of the input feature vector
-    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
-    size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
-    blitz::Array<double,2> m_F; ///< \f$F\f$ subspace of the PLDA model
-    blitz::Array<double,2> m_G; ///< \f$G\f$ subspace of the PLDA model
-    /**
-     * @brief \f$\Sigma\f$ diagonal (by assumption) covariance matrix of the
-     * PLDA model
-     */
-    blitz::Array<double,1> m_sigma;
-    blitz::Array<double,1> m_mu; ///< \f$\mu\f$ mean vector of the PLDA model
-    /**
-     * @brief The variance flooring thresholds, i.e. the minimum allowed
-     * value of variance m_sigma in each dimension.
-     * The variance will be set to this value if an attempt is made
-     * to set it to a smaller value.
-     */
-    double m_variance_threshold;
-
-    // Internal values very useful used to optimize the code
-    blitz::Array<double,1> m_cache_isigma; ///< \f$\Sigma^{-1}\f$
-    blitz::Array<double,2> m_cache_alpha; ///< \f$\alpha = (Id + G^T \Sigma^{-1} G)^{-1}\f$
-    /**
-     * @brief \f$\beta = (\Sigma+G G^T)^{-1} = (\Sigma^{-1} - \Sigma^{-1} G \alpha G^T \Sigma^{-1})^{-1}\f$
-     */
-    blitz::Array<double,2> m_cache_beta;
-    std::map<size_t, blitz::Array<double,2> > m_cache_gamma; ///< \f$\gamma_{a} = (Id + a F^T \beta F)^{-1}\f$
-    blitz::Array<double,2> m_cache_Ft_beta; ///< \f$F^{T} \beta \f$
-    blitz::Array<double,2> m_cache_Gt_isigma; ///< \f$G^{T} \Sigma^{-1} \f$
-    double m_cache_logdet_alpha; ///< \f$\log(\det(\alpha))\f$
-    double m_cache_logdet_sigma; ///< \f$\log(\det(\Sigma))\f$
-    /**
-     * @brief \f$l_{a} = \frac{a}{2} ( -D log(2*\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     */
-    std::map<size_t, double> m_cache_loglike_constterm;
-
-    // working arrays
-    mutable blitz::Array<double,1> m_tmp_d_1; ///< Cache vector of size dim_d
-    mutable blitz::Array<double,1> m_tmp_d_2; ///< Cache vector of size dim_d
-    mutable blitz::Array<double,2> m_tmp_d_ng_1; ///< Cache matrix of size dim_d x dim_g
-    mutable blitz::Array<double,2> m_tmp_nf_nf_1; ///< Cache matrix of size dim_f x dim_f
-    mutable blitz::Array<double,2> m_tmp_ng_ng_1; ///< Cache matrix of size dim_g x dim_g
-
-    // private methods
-    void resizeNoInit(const size_t dim_d, const size_t dim_f, const size_t dim_g);
-    void resizeTmp();
-    void initMuFGSigma();
-    void precomputeISigma();
-    void precomputeAlpha();
-    void precomputeBeta();
-    void precomputeGamma(const size_t a);
-    void precomputeFtBeta();
-    void precomputeGtISigma();
-    void precomputeLogDetAlpha();
-    void precomputeLogDetSigma();
-    void precomputeLogLikeConstTerm(const size_t a);
-};
-
-
-/**
- * @brief This class is a container for an enrolled identity/class. It
- * contains information extracted from the enrollment samples. It should
- * be used in combination with a PLDABase instance.\n
- * References:\n
- * 1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis:
- *     Applied to Face Recognition', Laurent El Shafey, Chris McCool,
- *     Roy Wallace, Sebastien Marcel, TPAMI'2013
- * 2. 'Probabilistic Linear Discriminant Analysis for Inference About
- *     Identity', Prince and Elder, ICCV'2007\n
- * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
- *     Elder and Prince, TPAMI'2012
- */
-class PLDAMachine
-{
-  public:
-    /**
-     * @brief Default constructor.\n
-     * Builds an otherwise invalid (No attached PLDABase) PLDAMachine.
-     */
-    PLDAMachine();
-    /**
-     * @brief Constructor, builds a new PLDAMachine, setting a
-     * PLDABase.
-     */
-    PLDAMachine(const boost::shared_ptr<bob::learn::misc::PLDABase> pldabase);
-    /**
-     * @brief Copies another PLDAMachine.\n Both PLDAMachine's will point
-     * to the same PLDABase.
-     */
-    PLDAMachine(const PLDAMachine& other);
-    /**
-     * @brief Starts a new PLDAMachine from an existing configuration object,
-     * and a PLDABase.
-     */
-    PLDAMachine(bob::io::base::HDF5File& config,
-      const boost::shared_ptr<bob::learn::misc::PLDABase> pldabase);
-
-    /**
-     * @brief Just to virtualise the destructor
-     */
-    virtual ~PLDAMachine();
-
-    /**
-     * @brief Assigns from a different machine
-     */
-    PLDAMachine& operator=(const PLDAMachine &other);
-
-    /**
-     * @brief Equal to.\n The two PLDAMachine's should have the same
-     * PLDABase. Precomputed members such as \f$\gamma_a\f$'s
-     * are compared!
-     */
-    bool operator==(const PLDAMachine& b) const;
-    /**
-     * @brief Not equal to.\n Defined as the negation of operator==
-     */
-    bool operator!=(const PLDAMachine& b) const;
-    /**
-     * @brief Equal to.\n The two PLDAMachine's should have the same
-     * PLDABase. Precomputed members such as \f$\gamma_a\f$'s
-     * are compared!
-     */
-    bool is_similar_to(const PLDAMachine& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Loads data from an existing configuration object. Resets the
-     * current state.
-     */
-    void load(bob::io::base::HDF5File& config);
-    /**
-     * @brief Saves an existing machine to a configuration object.
-     */
-    void save(bob::io::base::HDF5File& config) const;
-
-    /**
-     * @brief Gets the attached PLDABase
-     */
-    const boost::shared_ptr<PLDABase> getPLDABase() const
-    { return m_plda_base; }
-    /**
-     * @brief Sets the attached PLDABase
-     */
-    void setPLDABase(const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base);
-
-    /**
-     * @brief Gets the feature dimensionality
-     */
-    size_t getDimD() const
-    { if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-      return m_plda_base->getDimD(); }
-    /**
-     * @brief Gets the size/rank the \f$F\f$ subspace/matrix of the PLDA model
-     */
-    size_t getDimF() const
-    { if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-      return m_plda_base->getDimF(); }
-    /**
-     * @brief Gets the size/rank the \f$G\f$ subspace/matrix of the PLDA model
-     */
-    size_t getDimG() const
-    { if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
-      return m_plda_base->getDimG(); }
-
-    /**
-     * @brief Gets the number of enrolled samples
-     */
-    uint64_t getNSamples() const
-    { return m_n_samples; }
-    /**
-     * @brief Sets the number of enrolled samples
-     */
-    void setNSamples(const uint64_t n_samples)
-    { m_n_samples = n_samples; }
-    /**
-     * @brief Gets the \f$A = -0.5 \sum_{i} x_{i}^T \beta x_{i}\f$ value
-     */
-    double getWSumXitBetaXi() const
-    { return m_nh_sum_xit_beta_xi; }
-    /**
-     * @brief Sets the \f$A = -0.5 \sum_{i} x_{i}^T \beta x_{i}\f$ value
-     */
-    void setWSumXitBetaXi(const double val)
-    { m_nh_sum_xit_beta_xi = val; }
-    /**
-     * @brief Gets the current \f$\sum_{i} F^T \beta x_{i}\f$ value
-     */
-    const blitz::Array<double,1>& getWeightedSum() const
-    { return m_weighted_sum; }
-    /**
-     * @brief Sets the \f$\sum_{i} F^T \beta x_{i}\f$ value
-     */
-    void setWeightedSum(const blitz::Array<double,1>& weighted_sum);
-    /**
-     * @brief Returns the current \f$\sum_{i} F^T \beta x_{i}\f$ value
-     * in order to be updated.
-     * @warning Use with care. Only trainers should use this function for
-     * efficiency reasons.
-     */
-    blitz::Array<double,1>& updateWeightedSum()
-    { return m_weighted_sum; }
-    /**
-     * @brief Gets the log likelihood of the enrollment samples
-     */
-    double getLogLikelihood() const
-    { return m_loglikelihood; }
-    /**
-     * @brief Sets the log likelihood of the enrollment samples
-     */
-    void setLogLikelihood(const double val)
-    { m_loglikelihood = val; }
-
-    /**
-     * @brief Tells if the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number
-     * of samples) exists in this machine (does not check the base machine)
-     * \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
-     */
-    bool hasGamma(const size_t a) const
-    { return (m_cache_gamma.find(a) != m_cache_gamma.end()); }
-    /**
-     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
-     * samples) \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
-     * Tries to find it from the base machine and then from this machine
-     * @warning an exception is thrown if gamma does not exists
-     */
-    const blitz::Array<double,2>& getGamma(const size_t a) const;
-    /**
-     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
-     * samples) \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
-     * Tries to find it from the base machine and then from this machine
-     * @warning The matrix is computed if it does not already exists,
-     *   and stored in this machine
-     */
-    const blitz::Array<double,2>& getAddGamma(const size_t a);
-
-    /**
-     * @brief Tells if the log likelihood constant term for a given \f$a\f$
-     * (number of samples) exists in this machine
-     * (does not check the base machine)
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     */
-    bool hasLogLikeConstTerm(const size_t a) const
-    { return (m_cache_loglike_constterm.find(a) != m_cache_loglike_constterm.end()); }
-    /**
-     * @brief Gets the log likelihood constant term for a given \f$a\f$
-     * (number of samples)
-     * Tries to find it from the base machine and then from this machine
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     * @warning an exception is thrown if the value does not exists
-     */
-    double getLogLikeConstTerm(const size_t a) const;
-    /**
-     * @brief Gets the log likelihood constant term for a given \f$a\f$
-     * (number of samples)
-     * Tries to find it from the base machine and then from this machine
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     * @warning The value is computed if it does not already exists
-     */
-    double getAddLogLikeConstTerm(const size_t a);
-
-    /**
-     * @brief Clears the maps (\f$\gamma_a\f$ and loglike_constterm[a]).
-     */
-    void clearMaps();
-
-
-    /**
-     * @brief Compute the log-likelihood of the given sample and (optionally)
-     * the enrolled samples
-     */
-    double computeLogLikelihood(const blitz::Array<double,1>& sample,
-      bool with_enrolled_samples=true) const;
-    /**
-     * @brief Compute the log-likelihood of the given samples and (optionally)
-     * the enrolled samples
-     */
-    double computeLogLikelihood(const blitz::Array<double,2>& samples,
-      bool with_enrolled_samples=true) const;
-
-    /**
-     * @brief Computes a log likelihood ratio from a 1D or 2D blitz::Array
-     */
-    double forward(const blitz::Array<double,1>& sample);
-    double forward_(const blitz::Array<double,1>& sample);
-    double forward(const blitz::Array<double,2>& samples);
-
-
-  private:
-    /**
-     * @brief Associated PLDABase containing the model (\f$\mu\f$,
-     * \f$F\f$, \f$G\f$ and \f$\Sigma\f$)
-     */
-    boost::shared_ptr<PLDABase> m_plda_base;
-    uint64_t m_n_samples; ///< Number of enrollment samples
-    /**
-     * @brief Contains the value:\n
-     * \f$A = -0.5 (\sum_{i} x_{i}^{T} \Sigma^{-1} x_{i} - x_{i}^T \Sigma^{-1} G \alpha G^{T} \Sigma^{-1} x_{i})\f$\n
-     * \f$A = -0.5 \sum_{i} x_{i}^T \beta x_{i}\f$\n
-     * used in the likelihood computation (first \f$x_{i}\f$ dependent term)
-     */
-    double m_nh_sum_xit_beta_xi;
-    /**
-     * @brief Contains the value \f$\sum_{i} F^T \beta x_{i}\f$ used in the
-     * likelihood computation (for the second \f$x_{i}\f$ dependent term)
-     */
-    blitz::Array<double,1> m_weighted_sum;
-    double m_loglikelihood; ///< Log likelihood of the enrollment samples
-    /**
-     * @brief \f$\gamma_a\f$ balues which are not already in the
-     * PLDABase \f$\gamma_a = (Id + a F^T \beta F)^{-1}\f$
-     * (depend on the number of samples \f$a\f$)
-     */
-    std::map<size_t, blitz::Array<double,2> > m_cache_gamma;
-    /**
-     * @brief Log likelihood constant terms which depend on the number of
-     * samples \f$a\f$
-     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
-     */
-    std::map<size_t, double> m_cache_loglike_constterm;
-
-
-    // working arrays
-    mutable blitz::Array<double,1> m_tmp_d_1; ///< Cache vector of size dim_d
-    mutable blitz::Array<double,1> m_tmp_d_2; ///< Cache vector of size dim_d
-    mutable blitz::Array<double,1> m_tmp_nf_1; ///< Cache vector of size dim_f
-    mutable blitz::Array<double,1> m_tmp_nf_2; ///< Cache vector of size dim_f
-    mutable blitz::Array<double,2> m_tmp_nf_nf_1; ///< Cache vector of size dim_f dim_f
-
-    /**
-     * @brief Resizes the PLDAMachine
-     */
-    void resize(const size_t dim_d, const size_t dim_f, const size_t dim_g);
-    /**
-     * @brief Resize working arrays
-     */
-    void resizeTmp();
-};
-
-} } } // namespaces
-
-#endif // BOB_LEARN_MISC_PLDAMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h b/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h
deleted file mode 100644
index 3323083ffa310b6750757bce45cb73d73d397379..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h
+++ /dev/null
@@ -1,310 +0,0 @@
-/**
- * @date Fri Oct 14 18:07:56 2011 +0200
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * @brief Probabilistic PLDA Discriminant Analysis implemented using
- * Expectation Maximization.
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_PLDA_TRAINER_H
-#define BOB_LEARN_MISC_PLDA_TRAINER_H
-
-#include <bob.learn.misc/PLDAMachine.h>
-#include <boost/shared_ptr.hpp>
-#include <vector>
-#include <map>
-#include <bob.core/array_copy.h>
-#include <boost/random.hpp>
-#include <boost/random/mersenne_twister.hpp>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * @brief This class can be used to train the \f$F\f$, \f$G\f$ and
- * \f$\Sigma\f$ matrices and the mean vector \f$\mu\f$ of a PLDA model.\n
- * References:\n
- * 1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis:
- *     Applied to Face Recognition', Laurent El Shafey, Chris McCool,
- *     Roy Wallace, Sebastien Marcel, TPAMI'2013
- * 2. 'Probabilistic Linear Discriminant Analysis for Inference About
- *     Identity', Prince and Elder, ICCV'2007\n
- * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
- *     Elder and Prince, TPAMI'2012
- */
-class PLDATrainer
-{
-  public: //api
-    /**
-     * @brief Default constructor.\n Initializes a new PLDA trainer. The
-     * training stage will place the resulting components in the
-     * PLDABase.
-     */
-    PLDATrainer(const bool use_sum_second_order);
-
-    /**
-     * @brief Copy constructor
-     */
-    PLDATrainer(const PLDATrainer& other);
-
-    /**
-     * @brief (virtual) Destructor
-     */
-    virtual ~PLDATrainer();
-
-    /**
-     * @brief Assignment operator
-     */
-    PLDATrainer& operator=(const PLDATrainer& other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const PLDATrainer& other) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const PLDATrainer& other) const;
-
-    /**
-     * @brief Similarity operator
-     */
-    bool is_similar_to(const PLDATrainer& b,
-      const double r_epsilon=1e-5, const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief Performs some initialization before the E- and M-steps.
-     */
-    void initialize(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-    /**
-     * @brief Performs some actions after the end of the E- and M-steps.
-      */
-    void finalize(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset, and saves
-     * these as m_z_{first,second}_order.
-     * The statistics will be used in the mStep() that follows.
-     */
-    void eStep(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-
-    /**
-     * @brief Performs a maximization step to update the parameters of the
-     * PLDABase
-     */
-    void mStep(bob::learn::misc::PLDABase& machine,
-       const std::vector<blitz::Array<double,2> >& v_ar);
-
-
-    /**
-     * @brief Sets whether the second order statistics are stored during the
-     * training procedure, or only their sum.
-     */
-    void setUseSumSecondOrder(bool v) { m_use_sum_second_order = v; }
-    /**
-     * @brief Tells whether the second order statistics are stored during the
-     * training procedure, or only their sum.
-     */
-    bool getUseSumSecondOrder() const
-    { return m_use_sum_second_order; }
-
-    /**
-     * @brief This enum defines different methods for initializing the \f$F\f$
-     * subspace
-     */
-    typedef enum {
-      RANDOM_F=0,
-      BETWEEN_SCATTER=1
-    }
-    InitFMethod;
-    /**
-     * @brief This enum defines different methods for initializing the \f$G\f$
-     * subspace
-     */
-    typedef enum {
-      RANDOM_G=0,
-      WITHIN_SCATTER=1
-    }
-    InitGMethod;
-    /**
-     * @brief This enum defines different methods for initializing the
-     * \f$\Sigma\f$ covariance matrix
-     */
-    typedef enum {
-      RANDOM_SIGMA=0,
-      VARIANCE_G=1,
-      CONSTANT=2,
-      VARIANCE_DATA=3
-    }
-    InitSigmaMethod;
-    /**
-     * @brief Sets the method used to initialize \f$F\f$
-     */
-    void setInitFMethod(const InitFMethod m) { m_initF_method = m; }
-    /**
-     * @brief Gets the method used to initialize \f$F\f$
-     */
-    InitFMethod getInitFMethod() const { return m_initF_method; }
-    /**
-     * @brief Sets the ratio value used to initialize \f$F\f$
-     */
-    void setInitFRatio(double d) { m_initF_ratio = d; }
-    /**
-     * @brief Gets the ratio value used to initialize \f$F\f$
-     */
-    double getInitFRatio() const { return m_initF_ratio; }
-    /**
-     * @brief Sets the method used to initialize \f$G\f$
-     */
-    void setInitGMethod(const InitGMethod m) { m_initG_method = m; }
-    /**
-     * @brief Gets the method used to initialize \f$G\f$
-     */
-    InitGMethod getInitGMethod() const { return m_initG_method; }
-    /**
-     * @brief Sets the ratio value used to initialize \f$G\f$
-     */
-    void setInitGRatio(double d) { m_initG_ratio = d; }
-    /**
-     * @brief Gets the ratio value used to initialize \f$G\f$
-     */
-    double getInitGRatio() const { return m_initG_ratio; }
-    /**
-     * @brief Sets the method used to initialize \f$\Sigma\f$
-     */
-    void setInitSigmaMethod(const InitSigmaMethod m)
-    { m_initSigma_method = m; }
-    /**
-     * @brief Gets the method used to initialize \f$\Sigma\f$
-     */
-    InitSigmaMethod getInitSigmaMethod() const
-    { return m_initSigma_method; }
-    /**
-     * @brief Sets the ratio value used to initialize \f$\Sigma\f$
-     */
-    void setInitSigmaRatio(double d) { m_initSigma_ratio = d; }
-    /**
-     * @brief Gets the ratio value used to initialize \f$\Sigma\f$
-     */
-    double getInitSigmaRatio() const { return m_initSigma_ratio; }
-
-    /**
-     * @brief Gets the z first order statistics (mostly for test purposes)
-     */
-    const std::vector<blitz::Array<double,2> >& getZFirstOrder() const
-    { return m_cache_z_first_order;}
-    /**
-     * @brief Gets the z second order statistics (mostly for test purposes)
-     */
-    const blitz::Array<double,2>& getZSecondOrderSum() const
-    { return m_cache_sum_z_second_order;}
-    /**
-     * @brief Gets the z second order statistics (mostly for test purposes)
-     */
-    const std::vector<blitz::Array<double,3> >& getZSecondOrder() const
-    { if(m_use_sum_second_order)
-        throw std::runtime_error("You should disable the use_sum_second_order flag to use this feature");
-      return m_cache_z_second_order;
-    }
-
-    /**
-     * @brief Main procedure for enrolling a PLDAMachine
-     */
-    void enrol(bob::learn::misc::PLDAMachine& plda_machine,
-      const blitz::Array<double,2>& ar) const;
-      
-      
-    /**
-     * @brief Sets the Random Number Generator
-     */
-    void setRng(const boost::shared_ptr<boost::mt19937> rng)
-    { m_rng = rng; }
-
-    /**
-     * @brief Gets the Random Number Generator
-     */
-    const boost::shared_ptr<boost::mt19937> getRng() const
-    { return m_rng; }      
-
-  private:
-  
-    boost::shared_ptr<boost::mt19937> m_rng;
-  
-    //representation
-    size_t m_dim_d; ///< Dimensionality of the input features
-    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
-    size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
-    bool m_use_sum_second_order; ///< If set, only the sum of the second order statistics is stored/allocated
-    InitFMethod m_initF_method; ///< Initialization method for \f$F\f$
-    double m_initF_ratio; ///< Ratio/factor used for the initialization of \f$F\f$
-    InitGMethod m_initG_method; ///< Initialization method for \f$G\f$
-    double m_initG_ratio; ///< Ratio/factor used for the initialization of \f$G\f$
-    InitSigmaMethod m_initSigma_method; ///< Initialization method for \f$\Sigma\f$
-    double m_initSigma_ratio; ///< Ratio/factor used for the initialization of \f$\Sigma\f$
-
-    // Statistics and covariance computed during the training process
-    blitz::Array<double,2> m_cache_S; ///< Covariance of the training data
-    std::vector<blitz::Array<double,2> > m_cache_z_first_order; ///< Current mean of the z_{n} latent variable (1 for each sample)
-    blitz::Array<double,2> m_cache_sum_z_second_order; ///< Current sum of the covariance of the z_{n} latent variable
-    std::vector<blitz::Array<double,3> > m_cache_z_second_order; ///< Current covariance of the z_{n} latent variable
-    // Precomputed
-    /**
-     * @brief Number of training samples for each individual in the training set
-     */
-    std::vector<size_t> m_cache_n_samples_per_id;
-    /**
-     * @brief Tells if there is an identity with a 'key'/particular number of
-     * training samples, and if corresponding matrices are up to date.
-     */
-    std::map<size_t,bool> m_cache_n_samples_in_training;
-    blitz::Array<double,2> m_cache_B; ///< \f$B = [F, G]\f$ (size nfeatures x (m_dim_f+m_dim_g) )
-    blitz::Array<double,2> m_cache_Ft_isigma_G; ///< \f$F^T \Sigma^-1 G\f$
-    blitz::Array<double,2> m_cache_eta; ///< \f$F^T \Sigma^-1 G \alpha\f$
-    // Blocks (with \f$\gamma_{a}\f$) of \f$(Id + A^T \Sigma'^-1 A)^-1\f$ (efficient inversion)
-    std::map<size_t,blitz::Array<double,2> > m_cache_zeta; ///< \f$\zeta_{a} = \alpha + \eta^T \gamma_{a} \eta\f$
-    std::map<size_t,blitz::Array<double,2> > m_cache_iota; ///< \f$\iota_{a} = -\gamma_{a} \eta\f$
-
-    // Working arrays
-    mutable blitz::Array<double,1> m_tmp_nf_1; ///< vector of dimension dim_f
-    mutable blitz::Array<double,1> m_tmp_nf_2; ///< vector of dimension dim_f
-    mutable blitz::Array<double,1> m_tmp_ng_1; ///< vector of dimension dim_f
-    mutable blitz::Array<double,1> m_tmp_D_1; ///< vector of dimension dim_d
-    mutable blitz::Array<double,1> m_tmp_D_2; ///< vector of dimension dim_d
-    mutable blitz::Array<double,2> m_tmp_nfng_nfng; ///< matrix of dimension (dim_f+dim_g)x(dim_f+dim_g)
-    mutable blitz::Array<double,2> m_tmp_D_nfng_1; ///< matrix of dimension (dim_d)x(dim_f+dim_g)
-    mutable blitz::Array<double,2> m_tmp_D_nfng_2; ///< matrix of dimension (dim_d)x(dim_f+dim_g)
-
-    // internal methods
-    void computeMeanVariance(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-    void initMembers(const std::vector<blitz::Array<double,2> >& v_ar);
-    void initFGSigma(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-    void initF(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-    void initG(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-    void initSigma(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-
-    void checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar);
-    void precomputeFromFGSigma(bob::learn::misc::PLDABase& machine);
-    void precomputeLogLike(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-
-    void updateFG(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-    void updateSigma(bob::learn::misc::PLDABase& machine,
-      const std::vector<blitz::Array<double,2> >& v_ar);
-
-    void resizeTmp();
-};
-
-} } } // namespaces
-
-#endif /* BOB_LEARN_MISC_PLDA_TRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/ZTNorm.h b/bob/learn/misc/include/bob.learn.misc/ZTNorm.h
deleted file mode 100644
index 47f0af9c59916ca353cd48e2daab578276a5bdce..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/ZTNorm.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/**
- * @date Tue Jul 19 15:33:20 2011 +0200
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- *
- * Copyright (C) Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_LEARN_MISC_ZTNORM_H
-#define BOB_LEARN_MISC_ZTNORM_H
-
-#include <blitz/array.h>
-
-namespace bob { namespace learn { namespace misc {
-
-/**
- * Normalise raw scores with ZT-Norm
- *
- * @exception std::runtime_error matrix sizes are not consistent
- *
- * @param rawscores_probes_vs_models
- * @param rawscores_zprobes_vs_models
- * @param rawscores_probes_vs_tmodels
- * @param rawscores_zprobes_vs_tmodels
- * @param mask_zprobes_vs_tmodels_istruetrial
- * @param[out] normalizedscores normalized scores
- * @warning The destination score array should have the correct size
- *          (Same size as rawscores_probes_vs_models)
- */
-void ztNorm(const blitz::Array<double, 2>& rawscores_probes_vs_models,
-            const blitz::Array<double, 2>& rawscores_zprobes_vs_models,
-            const blitz::Array<double, 2>& rawscores_probes_vs_tmodels,
-            const blitz::Array<double, 2>& rawscores_zprobes_vs_tmodels,
-            const blitz::Array<bool,   2>& mask_zprobes_vs_tmodels_istruetrial,
-            blitz::Array<double, 2>& normalizedscores);
-
-/**
- * Normalise raw scores with ZT-Norm.
- * Assume that znorm and tnorm have no common subject id.
- *
- * @exception std::runtime_error matrix sizes are not consistent
- *
- * @param rawscores_probes_vs_models
- * @param rawscores_zprobes_vs_models
- * @param rawscores_probes_vs_tmodels
- * @param rawscores_zprobes_vs_tmodels
- * @param[out] normalizedscores normalized scores
- * @warning The destination score array should have the correct size
- *          (Same size as rawscores_probes_vs_models)
- */
-void ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
-            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
-            const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
-            blitz::Array<double,2>& normalizedscores);
-
-/**
- * Normalise raw scores with T-Norm.
- *
- * @exception std::runtime_error matrix sizes are not consistent
- *
- * @param rawscores_probes_vs_models
- * @param rawscores_probes_vs_tmodels
- * @param[out] normalizedscores normalized scores
- * @warning The destination score array should have the correct size
- *          (Same size as rawscores_probes_vs_models)
- */
-void tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-           const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
-           blitz::Array<double,2>& normalizedscores);
-
-/**
- * Normalise raw scores with Z-Norm.
- *
- * @exception std::runtime_error matrix sizes are not consistent
- *
- * @param rawscores_probes_vs_models
- * @param rawscores_zprobes_vs_models
- * @param[out] normalizedscores normalized scores
- * @warning The destination score array should have the correct size
- *          (Same size as rawscores_probes_vs_models)
- */
-void zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-           const blitz::Array<double,2>& rawscores_zprobes_vs_models,
-           blitz::Array<double,2>& normalizedscores);
-
-} } } // namespaces
-
-#endif /* BOB_LEARN_MISC_ZTNORM_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/api.h b/bob/learn/misc/include/bob.learn.misc/api.h
deleted file mode 100644
index 44208e5cc7b7a10c8d7605396e3227b824fa289d..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/api.h
+++ /dev/null
@@ -1,127 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Fri 21 Nov 10:38:48 2013
- *
- * @brief Python API for bob::learn::em
- */
-
-#ifndef BOB_LEARN_EM_API_H
-#define BOB_LEARN_EM_API_H
-
-/* Define Module Name and Prefix for other Modules
-   Note: We cannot use BOB_EXT_* macros here, unfortunately */
-#define BOB_LEARN_EM_PREFIX    "bob.learn.misc"
-#define BOB_LEARN_EM_FULL_NAME "bob.learn.misc._library"
-
-#include <Python.h>
-
-#include <bob.learn.misc/config.h>
-#include <boost/shared_ptr.hpp>
-
-/*******************
- * C API functions *
- *******************/
-
-/* Enum defining entries in the function table */
-enum _PyBobLearnMisc_ENUM{
-  PyBobLearnMisc_APIVersion_NUM = 0,
-  // bindings
-  ////PyBobIpBaseLBP_Type_NUM,
-  ////PyBobIpBaseLBP_Check_NUM,
-  ////PyBobIpBaseLBP_Converter_NUM,
-  // Total number of C API pointers
-  PyBobLearnMisc_API_pointers
-};
-
-
-#ifdef BOB_LEARN_EM_MODULE
-
-  /* This section is used when compiling `bob.io.base' itself */
-
-  /**************
-   * Versioning *
-   **************/
-
-  extern int PyBobLearnMisc_APIVersion;
-
-#else // BOB_LEARN_EM_MODULE
-
-  /* This section is used in modules that use `bob.io.base's' C-API */
-
-#if defined(NO_IMPORT_ARRAY)
-  extern void **PyBobLearnMisc_API;
-#elif defined(PY_ARRAY_UNIQUE_SYMBOL)
-  void **PyBobLearnMisc_API;
-#else
-  static void **PyBobLearnMisc_API=NULL;
-#endif
-
-  /**************
-   * Versioning *
-   **************/
-
-#define PyBobLearnMisc_APIVersion (*(int *)PyBobLearnMisc_API[PyBobLearnMisc_APIVersion_NUM])
-
-#if !defined(NO_IMPORT_ARRAY)
-
-  /**
-   * Returns -1 on error, 0 on success.
-   */
-  static int import_bob_learn_misc(void) {
-
-    PyObject *c_api_object;
-    PyObject *module;
-
-    module = PyImport_ImportModule(BOB_LEARN_EM_FULL_NAME);
-
-    if (module == NULL) return -1;
-
-    c_api_object = PyObject_GetAttrString(module, "_C_API");
-
-    if (c_api_object == NULL) {
-      Py_DECREF(module);
-      return -1;
-    }
-
-#if PY_VERSION_HEX >= 0x02070000
-    if (PyCapsule_CheckExact(c_api_object)) {
-      PyBobLearnMisc_API = (void **)PyCapsule_GetPointer(c_api_object, PyCapsule_GetName(c_api_object));
-    }
-#else
-    if (PyCObject_Check(c_api_object)) {
-      PyBobLearnMisc_API = (void **)PyCObject_AsVoidPtr(c_api_object);
-    }
-#endif
-
-    Py_DECREF(c_api_object);
-    Py_DECREF(module);
-
-    if (!PyBobLearnMisc_API) {
-      PyErr_SetString(PyExc_ImportError, "cannot find C/C++ API "
-#if PY_VERSION_HEX >= 0x02070000
-          "capsule"
-#else
-          "cobject"
-#endif
-          " at `" BOB_LEARN_EM_FULL_NAME "._C_API'");
-      return -1;
-    }
-
-    /* Checks that the imported version matches the compiled version */
-    int imported_version = *(int*)PyBobLearnMisc_API[PyBobLearnMisc_APIVersion_NUM];
-
-    if (BOB_LEARN_MISC_API_VERSION != imported_version) {
-      PyErr_Format(PyExc_ImportError, BOB_LEARN_EM_FULL_NAME " import error: you compiled against API version 0x%04x, but are now importing an API with version 0x%04x which is not compatible - check your Python runtime environment for errors", BOB_LEARN_MISC_API_VERSION, imported_version);
-      return -1;
-    }
-
-    /* If you get to this point, all is good */
-    return 0;
-
-  }
-
-#endif //!defined(NO_IMPORT_ARRAY)
-
-#endif /* BOB_LEARN_EM_MODULE */
-
-#endif /* BOB_LEARN_EM_API_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/config.h b/bob/learn/misc/include/bob.learn.misc/config.h
deleted file mode 100644
index c35fa5c31e9d60008edad46a7e56324d3d77919f..0000000000000000000000000000000000000000
--- a/bob/learn/misc/include/bob.learn.misc/config.h
+++ /dev/null
@@ -1,14 +0,0 @@
-/**
- * @author Manuel Guenther <manuel.guenther@idiap.ch>
- * @date Thu Aug 21 20:49:42 CEST 2014
- *
- * @brief General directives for all modules in bob.learn.misc
- */
-
-#ifndef BOB_LEARN_MISC_CONFIG_H
-#define BOB_LEARN_MISC_CONFIG_H
-
-/* Macros that define versions and important names */
-#define BOB_LEARN_MISC_API_VERSION 0x0200
-
-#endif /* BOB_LEARN_MISC_CONFIG_H */
diff --git a/bob/learn/misc/isv_base.cpp b/bob/learn/misc/isv_base.cpp
deleted file mode 100644
index 023a6d36b1375de442d32a11bfc4361dc21b137a..0000000000000000000000000000000000000000
--- a/bob/learn/misc/isv_base.cpp
+++ /dev/null
@@ -1,528 +0,0 @@
-/**
- * @date Wed Jan 28 11:13:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto ISVBase_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".ISVBase",
-
-  "A ISVBase instance can be seen as a container for U and D when performing Joint Factor Analysis (JFA)."
-  "References: [Vogt2008,McCool2013]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a ISVBase",
-    "",
-    true
-  )
-  .add_prototype("gmm,ru","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-  .add_prototype("","")
-
-  .add_parameter("gmm", ":py:class:`bob.learn.misc.GMMMachine`", "The Universal Background Model.")
-  .add_parameter("ru", "int", "Size of U (Within client variation matrix). In the end the U matrix will have (number_of_gaussians * feature_dimension x ru)")
-  .add_parameter("other", ":py:class:`bob.learn.misc.ISVBase`", "A ISVBase object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscISVBase_init_copy(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVBase_doc.kwlist(1);
-  PyBobLearnMiscISVBaseObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscISVBase_Type, &o)){
-    ISVBase_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ISVBase(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVBase_init_hdf5(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVBase_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    ISVBase_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ISVBase(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVBase_init_ubm(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVBase_doc.kwlist(0);
-  
-  PyBobLearnMiscGMMMachineObject* ubm;
-  int ru = 1;
-
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i", kwlist, &PyBobLearnMiscGMMMachine_Type, &ubm, &ru)){
-    ISVBase_doc.print_usage();
-    return -1;
-  }
-  
-  if(ru < 0){
-    PyErr_Format(PyExc_TypeError, "ru argument must be greater than or equal to one");
-    return -1;
-  }
-  
-  self->cxx.reset(new bob::learn::misc::ISVBase(ubm->cxx, ru));
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVBase_init(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-    
-  switch (nargs) {
-
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is Gaussian object
-     if (PyBobLearnMiscISVBase_Check(arg))
-       return PyBobLearnMiscISVBase_init_copy(self, args, kwargs);
-      // If the constructor input is a HDF5
-     else if (PyBobIoHDF5File_Check(arg))
-       return PyBobLearnMiscISVBase_init_hdf5(self, args, kwargs);
-    }
-    case 2:
-      return PyBobLearnMiscISVBase_init_ubm(self, args, kwargs);
-    default:
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      ISVBase_doc.print_usage();
-      return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create ISVBase", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscISVBase_delete(PyBobLearnMiscISVBaseObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscISVBase_RichCompare(PyBobLearnMiscISVBaseObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscISVBase_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscISVBaseObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare ISVBase objects", 0)
-}
-
-int PyBobLearnMiscISVBase_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscISVBase_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int)",
-  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rU (within client variability matrix) `(#Gaussians, #Inputs, #rU)`.",
-  ""
-);
-PyObject* PyBobLearnMiscISVBase_getShape(PyBobLearnMiscISVBaseObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** supervector_length *****/
-static auto supervector_length = bob::extension::VariableDoc(
-  "supervector_length",
-  "int",
-
-  "Returns the supervector length."
-  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
-  "@warning An exception is thrown if no Universal Background Model has been set yet."
-);
-PyObject* PyBobLearnMiscISVBase_getSupervectorLength(PyBobLearnMiscISVBaseObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("i", self->cxx->getSupervectorLength());
-  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
-}
-
-
-/***** u *****/
-static auto U = bob::extension::VariableDoc(
-  "u",
-  "array_like <float, 2D>",
-  "Returns the U matrix (within client variability matrix)",
-  ""
-);
-PyObject* PyBobLearnMiscISVBase_getU(PyBobLearnMiscISVBaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getU());
-  BOB_CATCH_MEMBER("``u`` could not be read", 0)
-}
-int PyBobLearnMiscISVBase_setU(PyBobLearnMiscISVBaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, U.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "u");
-  if (!b) return -1;
-  self->cxx->setU(*b);
-  return 0;
-  BOB_CATCH_MEMBER("``u`` matrix could not be set", -1)
-}
-
-
-/***** d *****/
-static auto D = bob::extension::VariableDoc(
-  "d",
-  "array_like <float, 1D>",
-  "Returns the diagonal matrix diag(d) (as a 1D vector)",
-  ""
-);
-PyObject* PyBobLearnMiscISVBase_getD(PyBobLearnMiscISVBaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getD());
-  BOB_CATCH_MEMBER("``d`` could not be read", 0)
-}
-int PyBobLearnMiscISVBase_setD(PyBobLearnMiscISVBaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, D.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "d");
-  if (!b) return -1;
-  self->cxx->setD(*b);
-  return 0;
-  BOB_CATCH_MEMBER("``d`` matrix could not be set", -1)
-}
-
-
-/***** ubm *****/
-static auto ubm = bob::extension::VariableDoc(
-  "ubm",
-  ":py:class:`bob.learn.misc.GMMMachine`",
-  "Returns the UBM (Universal Background Model",
-  ""
-);
-PyObject* PyBobLearnMiscISVBase_getUBM(PyBobLearnMiscISVBaseObject* self, void*){
-  BOB_TRY
-
-  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm_gmmMachine = self->cxx->getUbm();
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscGMMMachineObject* retval =
-    (PyBobLearnMiscGMMMachineObject*)PyBobLearnMiscGMMMachine_Type.tp_alloc(&PyBobLearnMiscGMMMachine_Type, 0);
-  retval->cxx = ubm_gmmMachine;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("ubm could not be read", 0)
-}
-int PyBobLearnMiscISVBase_setUBM(PyBobLearnMiscISVBaseObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMMachine_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.GMMMachine`", Py_TYPE(self)->tp_name, ubm.name());
-    return -1;
-  }
-
-  PyBobLearnMiscGMMMachineObject* ubm_gmmMachine = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscGMMMachine_Type,&ubm_gmmMachine);
-
-  self->cxx->setUbm(ubm_gmmMachine->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("ubm could not be set", -1)  
-}
-
-
-
-static PyGetSetDef PyBobLearnMiscISVBase_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscISVBase_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  
-  {
-   supervector_length.name(),
-   (getter)PyBobLearnMiscISVBase_getSupervectorLength,
-   0,
-   supervector_length.doc(),
-   0
-  },
-  
-  {
-   U.name(),
-   (getter)PyBobLearnMiscISVBase_getU,
-   (setter)PyBobLearnMiscISVBase_setU,
-   U.doc(),
-   0
-  },
-  
-  {
-   D.name(),
-   (getter)PyBobLearnMiscISVBase_getD,
-   (setter)PyBobLearnMiscISVBase_setD,
-   D.doc(),
-   0
-  },
-
-  {
-   ubm.name(),
-   (getter)PyBobLearnMiscISVBase_getUBM,
-   (setter)PyBobLearnMiscISVBase_setUBM,
-   ubm.doc(),
-   0
-  },
-
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the ISVBase to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscISVBase_Save(PyBobLearnMiscISVBaseObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the ISVBase to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscISVBase_Load(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this ISVBase with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.ISVBase`", "A ISVBase object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscISVBase_IsSimilarTo(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscISVBaseObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscISVBase_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Resets the dimensionality of the subspace U. "
-  "U is hence uninitialized.",
-  0,
-  true
-)
-.add_prototype("rU")
-.add_parameter("rU", "int", "Size of U (Within client variation matrix)");
-static PyObject* PyBobLearnMiscISVBase_resize(PyBobLearnMiscISVBaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int rU = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &rU)) Py_RETURN_NONE;
-
-  if (rU <= 0){
-    PyErr_Format(PyExc_TypeError, "rU must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-
-  self->cxx->resize(rU);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-
-static PyMethodDef PyBobLearnMiscISVBase_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscISVBase_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscISVBase_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscISVBase_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscISVBase_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the ISV type struct; will be initialized later
-PyTypeObject PyBobLearnMiscISVBase_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscISVBase(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscISVBase_Type.tp_name      = ISVBase_doc.name();
-  PyBobLearnMiscISVBase_Type.tp_basicsize = sizeof(PyBobLearnMiscISVBaseObject);
-  PyBobLearnMiscISVBase_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscISVBase_Type.tp_doc       = ISVBase_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscISVBase_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscISVBase_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscISVBase_init);
-  PyBobLearnMiscISVBase_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscISVBase_delete);
-  PyBobLearnMiscISVBase_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscISVBase_RichCompare);
-  PyBobLearnMiscISVBase_Type.tp_methods     = PyBobLearnMiscISVBase_methods;
-  PyBobLearnMiscISVBase_Type.tp_getset      = PyBobLearnMiscISVBase_getseters;
-  //PyBobLearnMiscISVBase_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscISVBase_forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscISVBase_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscISVBase_Type);
-  return PyModule_AddObject(module, "ISVBase", (PyObject*)&PyBobLearnMiscISVBase_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/isv_machine.cpp b/bob/learn/misc/isv_machine.cpp
deleted file mode 100644
index 0d7f0da6e450406b5d0b86edad6221c7342108e4..0000000000000000000000000000000000000000
--- a/bob/learn/misc/isv_machine.cpp
+++ /dev/null
@@ -1,604 +0,0 @@
-/**
- * @date Wed Jan 28 13:03:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto ISVMachine_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".ISVMachine",
-  "A ISVMachine. An attached :py:class:`bob.learn.misc.ISVBase` should be provided for Joint Factor Analysis. The :py:class:`bob.learn.misc.ISVMachine` carries information about the speaker factors y and z, whereas a :py:class:`bob.learn.misc.JFABase` carries information about the matrices U, V and D."
-  "References: [Vogt2008,McCool2013]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new ISVMachine",
-    "",
-    true
-  )
-  .add_prototype("isv_base","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-
-  .add_parameter("isv", ":py:class:`bob.learn.misc.ISVBase`", "The ISVBase associated with this machine")
-  .add_parameter("other", ":py:class:`bob.learn.misc.ISVMachine`", "A ISVMachine object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscISVMachine_init_copy(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVMachine_doc.kwlist(1);
-  PyBobLearnMiscISVMachineObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscISVMachine_Type, &o)){
-    ISVMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ISVMachine(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVMachine_init_hdf5(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVMachine_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    ISVMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ISVMachine(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVMachine_init_isvbase(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVMachine_doc.kwlist(0);
-  
-  PyBobLearnMiscISVBaseObject* isv_base;
-
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscISVBase_Type, &isv_base)){
-    ISVMachine_doc.print_usage();
-    return -1;
-  }
-  
-  self->cxx.reset(new bob::learn::misc::ISVMachine(isv_base->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVMachine_init(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs == 1){
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is Gaussian object
-    if (PyBobLearnMiscISVMachine_Check(arg))
-      return PyBobLearnMiscISVMachine_init_copy(self, args, kwargs);
-    // If the constructor input is a HDF5
-    else if (PyBobIoHDF5File_Check(arg))
-      return PyBobLearnMiscISVMachine_init_hdf5(self, args, kwargs);
-    // If the constructor input is a JFABase Object
-    else
-      return PyBobLearnMiscISVMachine_init_isvbase(self, args, kwargs);
-  }
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-    ISVMachine_doc.print_usage();
-    return -1;
-  }
-  
-  BOB_CATCH_MEMBER("cannot create ISVMachine", 0)
-  return 0;
-}
-
-static void PyBobLearnMiscISVMachine_delete(PyBobLearnMiscISVMachineObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscISVMachine_RichCompare(PyBobLearnMiscISVMachineObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscISVMachine_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscISVMachineObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare ISVMachine objects", 0)
-}
-
-int PyBobLearnMiscISVMachine_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscISVMachine_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int, int)",
-  "A tuple that represents the number of gaussians, dimensionality of each Gaussian and dimensionality of the rU (within client variability matrix)) ``(#Gaussians, #Inputs, #rU)``.",
-  ""
-);
-PyObject* PyBobLearnMiscISVMachine_getShape(PyBobLearnMiscISVMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** supervector_length *****/
-static auto supervector_length = bob::extension::VariableDoc(
-  "supervector_length",
-  "int",
-
-  "Returns the supervector length."
-  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
-  "@warning An exception is thrown if no Universal Background Model has been set yet."
-);
-PyObject* PyBobLearnMiscISVMachine_getSupervectorLength(PyBobLearnMiscISVMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("i", self->cxx->getSupervectorLength());
-  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
-}
-
-/***** z *****/
-static auto Z = bob::extension::VariableDoc(
-  "z",
-  "array_like <float, 1D>",
-  "Returns the z speaker factor. Eq (31) from [McCool2013]",
-  ""
-);
-PyObject* PyBobLearnMiscISVMachine_getZ(PyBobLearnMiscISVMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZ());
-  BOB_CATCH_MEMBER("`z` could not be read", 0)
-}
-int PyBobLearnMiscISVMachine_setZ(PyBobLearnMiscISVMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, Z.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "z");
-  if (!b) return -1;
-  self->cxx->setZ(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`z` vector could not be set", -1)
-}
-
-
-/***** x *****/
-static auto X = bob::extension::VariableDoc(
-  "x",
-  "array_like <float, 1D>",
-  "Returns the X session factor. Eq (29) from [McCool2013]",
-  "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines."
-);
-PyObject* PyBobLearnMiscISVMachine_getX(PyBobLearnMiscISVMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getX());
-  BOB_CATCH_MEMBER("`x` could not be read", 0)
-}
-
-
-/***** isv_base *****/
-static auto isv_base = bob::extension::VariableDoc(
-  "isv_base",
-  ":py:class:`bob.learn.misc.ISVBase`",
-  "The ISVBase attached to this machine",
-  ""
-);
-PyObject* PyBobLearnMiscISVMachine_getISVBase(PyBobLearnMiscISVMachineObject* self, void*){
-  BOB_TRY
-
-  boost::shared_ptr<bob::learn::misc::ISVBase> isv_base_o = self->cxx->getISVBase();
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscISVBaseObject* retval =
-    (PyBobLearnMiscISVBaseObject*)PyBobLearnMiscISVBase_Type.tp_alloc(&PyBobLearnMiscISVBase_Type, 0);
-  retval->cxx = isv_base_o;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("isv_base could not be read", 0)
-}
-int PyBobLearnMiscISVMachine_setISVBase(PyBobLearnMiscISVMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscISVBase_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.ISVBase`", Py_TYPE(self)->tp_name, isv_base.name());
-    return -1;
-  }
-
-  PyBobLearnMiscISVBaseObject* isv_base_o = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscISVBase_Type,&isv_base_o);
-
-  self->cxx->setISVBase(isv_base_o->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("isv_base could not be set", -1)  
-}
-
-
-
-
-static PyGetSetDef PyBobLearnMiscISVMachine_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscISVMachine_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  
-  {
-   supervector_length.name(),
-   (getter)PyBobLearnMiscISVMachine_getSupervectorLength,
-   0,
-   supervector_length.doc(),
-   0
-  },
-  
-  {
-   isv_base.name(),
-   (getter)PyBobLearnMiscISVMachine_getISVBase,
-   (setter)PyBobLearnMiscISVMachine_setISVBase,
-   isv_base.doc(),
-   0
-  },
-
-  {
-   Z.name(),
-   (getter)PyBobLearnMiscISVMachine_getZ,
-   (setter)PyBobLearnMiscISVMachine_setZ,
-   Z.doc(),
-   0
-  },
-
-  {
-   X.name(),
-   (getter)PyBobLearnMiscISVMachine_getX,
-   0,
-   X.doc(),
-   0
-  },
-
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the ISVMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscISVMachine_Save(PyBobLearnMiscISVMachineObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the ISVMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscISVMachine_Load(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this ISVMachine with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.ISVMachine`", "A ISVMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscISVMachine_IsSimilarTo(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscISVMachineObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscISVMachine_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** estimate_x ***/
-static auto estimate_x = bob::extension::FunctionDoc(
-  "estimate_x",
-  "Estimates the session offset x (LPT assumption) given GMM statistics.",
-  "Estimates x from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM", 
-  true
-)
-.add_prototype("stats,input")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics of the GMM")
-.add_parameter("input", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscISVMachine_estimateX(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = estimate_x.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyBlitzArrayObject* input           = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMStats_Type, &stats, 
-                                                                 &PyBlitzArray_Converter,&input))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-  self->cxx->estimateX(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
-
-  BOB_CATCH_MEMBER("cannot estimate X", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** estimate_ux ***/
-static auto estimate_ux = bob::extension::FunctionDoc(
-  "estimate_ux",
-  "Estimates Ux (LPT assumption) given GMM statistics.",
-  "Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.", 
-  true
-)
-.add_prototype("stats,input")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics of the GMM")
-.add_parameter("input", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscISVMachine_estimateUx(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = estimate_ux.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyBlitzArrayObject* input           = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMStats_Type, &stats, 
-                                                                 &PyBlitzArray_Converter,&input))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-  self->cxx->estimateUx(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
-
-  BOB_CATCH_MEMBER("cannot estimate Ux", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** forward_ux ***/
-static auto forward_ux = bob::extension::FunctionDoc(
-  "forward_ux",
-  "Computes a score for the given UBM statistics and given the Ux vector",
-  "", 
-  true
-)
-.add_prototype("stats,ux")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input")
-.add_parameter("ux", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscISVMachine_ForwardUx(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = forward_ux.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyBlitzArrayObject* ux_input        = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMStats_Type, &stats, 
-                                                                 &PyBlitzArray_Converter,&ux_input))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto ux_input_ = make_safe(ux_input);
-  double score = self->cxx->forward(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(ux_input));
-  
-  return Py_BuildValue("d", score);
-  BOB_CATCH_MEMBER("cannot forward_ux", 0)
-}
-
-
-/*** forward ***/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
-  "Execute the machine",
-  "", 
-  true
-)
-.add_prototype("stats")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnMiscISVMachine_Forward(PyBobLearnMiscISVMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = forward.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  double score = self->cxx->forward(*stats->cxx);
-
-  return Py_BuildValue("d", score);
-  BOB_CATCH_MEMBER("cannot forward", 0)
-
-}
-
-
-static PyMethodDef PyBobLearnMiscISVMachine_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscISVMachine_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscISVMachine_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscISVMachine_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  
-  {
-    estimate_x.name(),
-    (PyCFunction)PyBobLearnMiscISVMachine_estimateX,
-    METH_VARARGS|METH_KEYWORDS,
-    estimate_x.doc()
-  },
-  
-  {
-    estimate_ux.name(),
-    (PyCFunction)PyBobLearnMiscISVMachine_estimateUx,
-    METH_VARARGS|METH_KEYWORDS,
-    estimate_ux.doc()
-  },
-
-  {
-    forward_ux.name(),
-    (PyCFunction)PyBobLearnMiscISVMachine_ForwardUx,
-    METH_VARARGS|METH_KEYWORDS,
-    forward_ux.doc()
-  },
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the JFA type struct; will be initialized later
-PyTypeObject PyBobLearnMiscISVMachine_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscISVMachine(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscISVMachine_Type.tp_name      = ISVMachine_doc.name();
-  PyBobLearnMiscISVMachine_Type.tp_basicsize = sizeof(PyBobLearnMiscISVMachineObject);
-  PyBobLearnMiscISVMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscISVMachine_Type.tp_doc       = ISVMachine_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscISVMachine_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscISVMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscISVMachine_init);
-  PyBobLearnMiscISVMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscISVMachine_delete);
-  PyBobLearnMiscISVMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscISVMachine_RichCompare);
-  PyBobLearnMiscISVMachine_Type.tp_methods     = PyBobLearnMiscISVMachine_methods;
-  PyBobLearnMiscISVMachine_Type.tp_getset      = PyBobLearnMiscISVMachine_getseters;
-  PyBobLearnMiscISVMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscISVMachine_Forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscISVMachine_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscISVMachine_Type);
-  return PyModule_AddObject(module, "ISVMachine", (PyObject*)&PyBobLearnMiscISVMachine_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/isv_trainer.cpp b/bob/learn/misc/isv_trainer.cpp
deleted file mode 100644
index 6ac3b83cae707b206f59a5b111338b2f3a5fd888..0000000000000000000000000000000000000000
--- a/bob/learn/misc/isv_trainer.cpp
+++ /dev/null
@@ -1,566 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Mon 02 Fev 20:20:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-#include <boost/make_shared.hpp>
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static int extract_GMMStats_1d(PyObject *list,
-                             std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
-    PyBobLearnMiscGMMStatsObject* stats;
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
-      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
-      return -1;
-    }
-    training_data.push_back(stats->cxx);
-  }
-  return 0;
-}
-
-static int extract_GMMStats_2d(PyObject *list,
-                             std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++)
-  {
-    PyObject* another_list;
-    PyArg_Parse(PyList_GetItem(list, i), "O!", &PyList_Type, &another_list);
-
-    std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > another_training_data;
-    for (int j=0; j<PyList_GET_SIZE(another_list); j++){
-
-      PyBobLearnMiscGMMStatsObject* stats;
-      if (!PyArg_Parse(PyList_GetItem(another_list, j), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
-        PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
-        return -1;
-      }
-      another_training_data.push_back(stats->cxx);
-    }
-    training_data.push_back(another_training_data);
-  }
-  return 0;
-}
-
-template <int N>
-static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
-{
-  PyObject* list = PyList_New(vec.size());
-  for(size_t i=0; i<vec.size(); i++){
-    blitz::Array<double,N> numpy_array = vec[i];
-    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
-    PyList_SET_ITEM(list, i, numpy_py_object);
-  }
-  return list;
-}
-
-template <int N>
-int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++)
-  {
-    PyBlitzArrayObject* blitz_object; 
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
-      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
-      return -1;
-    }
-    auto blitz_object_ = make_safe(blitz_object);
-    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
-  }
-  return 0;
-}
-
-
-
-static auto ISVTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".ISVTrainer",
-  "ISVTrainer"
-  "References: [Vogt2008,McCool2013]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new ISVTrainer",
-    "",
-    true
-  )
-  .add_prototype("relevance_factor","")
-  .add_prototype("other","")
-  .add_prototype("","")
-  .add_parameter("other", ":py:class:`bob.learn.misc.ISVTrainer`", "A ISVTrainer object to be copied.")
-  .add_parameter("relevance_factor", "double", "")
-  .add_parameter("convergence_threshold", "double", "")
-);
-
-
-static int PyBobLearnMiscISVTrainer_init_copy(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVTrainer_doc.kwlist(1);
-  PyBobLearnMiscISVTrainerObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscISVTrainer_Type, &o)){
-    ISVTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ISVTrainer(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVTrainer_init_number(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = ISVTrainer_doc.kwlist(0);
-  double relevance_factor      = 4.;
-
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &relevance_factor))
-    return -1;
-
-  if(relevance_factor < 0){
-    PyErr_Format(PyExc_TypeError, "gaussians argument must be greater than zero");
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::ISVTrainer(relevance_factor));
-  return 0;
-}
-
-
-static int PyBobLearnMiscISVTrainer_init(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  switch(nargs){
-    case 0:{
-      self->cxx.reset(new bob::learn::misc::ISVTrainer());
-      return 0;
-    }
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-      
-      if(PyBobLearnMiscISVTrainer_Check(arg))
-        // If the constructor input is ISVTrainer object
-        return PyBobLearnMiscISVTrainer_init_copy(self, args, kwargs);
-      else
-        return PyBobLearnMiscISVTrainer_init_number(self, args, kwargs);
-
-    }
-    default:{
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      ISVTrainer_doc.print_usage();
-      return -1;
-    }
-  }
-  BOB_CATCH_MEMBER("cannot create ISVTrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscISVTrainer_delete(PyBobLearnMiscISVTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscISVTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscISVTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscISVTrainer_RichCompare(PyBobLearnMiscISVTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscISVTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscISVTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare ISVTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-static auto acc_u_a1 = bob::extension::VariableDoc(
-  "acc_u_a1",
-  "array_like <float, 3D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscISVTrainer_get_acc_u_a1(PyBobLearnMiscISVTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA1());
-  BOB_CATCH_MEMBER("acc_u_a1 could not be read", 0)
-}
-int PyBobLearnMiscISVTrainer_set_acc_u_a1(PyBobLearnMiscISVTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_u_a1.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_u_a1");
-  if (!b) return -1;
-  self->cxx->setAccUA1(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_u_a1 could not be set", -1)
-}
-
-
-static auto acc_u_a2 = bob::extension::VariableDoc(
-  "acc_u_a2",
-  "array_like <float, 2D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscISVTrainer_get_acc_u_a2(PyBobLearnMiscISVTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA2());
-  BOB_CATCH_MEMBER("acc_u_a2 could not be read", 0)
-}
-int PyBobLearnMiscISVTrainer_set_acc_u_a2(PyBobLearnMiscISVTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_u_a2.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_u_a2");
-  if (!b) return -1;
-  self->cxx->setAccUA2(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_u_a2 could not be set", -1)
-}
-
-
-
-
-
-static auto __X__ = bob::extension::VariableDoc(
-  "__X__",
-  "list",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscISVTrainer_get_X(PyBobLearnMiscISVTrainerObject* self, void*){
-  BOB_TRY
-  return vector_as_list(self->cxx->getX());
-  BOB_CATCH_MEMBER("__X__ could not be read", 0)
-}
-int PyBobLearnMiscISVTrainer_set_X(PyBobLearnMiscISVTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  if (!PyList_Check(value)){
-    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __X__.name());
-    return -1;
-  }
-    
-  std::vector<blitz::Array<double,2> > data;
-  if(list_as_vector(value ,data)==0){
-    self->cxx->setX(data);
-  }
-    
-  return 0;
-  BOB_CATCH_MEMBER("__X__ could not be written", 0)
-}
-
-
-static auto __Z__ = bob::extension::VariableDoc(
-  "__Z__",
-  "list",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscISVTrainer_get_Z(PyBobLearnMiscISVTrainerObject* self, void*){
-  BOB_TRY
-  return vector_as_list(self->cxx->getZ());
-  BOB_CATCH_MEMBER("__Z__ could not be read", 0)
-}
-int PyBobLearnMiscISVTrainer_set_Z(PyBobLearnMiscISVTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  if (!PyList_Check(value)){
-    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Z__.name());
-    return -1;
-  }
-    
-  std::vector<blitz::Array<double,1> > data;
-  if(list_as_vector(value ,data)==0){
-    self->cxx->setZ(data);
-  }
-    
-  return 0;
-  BOB_CATCH_MEMBER("__Z__ could not be written", 0)
-}
-
-
-
-
-static PyGetSetDef PyBobLearnMiscISVTrainer_getseters[] = { 
-  {
-   acc_u_a1.name(),
-   (getter)PyBobLearnMiscISVTrainer_get_acc_u_a1,
-   (setter)PyBobLearnMiscISVTrainer_set_acc_u_a1,
-   acc_u_a1.doc(),
-   0
-  },
-  {
-   acc_u_a2.name(),
-   (getter)PyBobLearnMiscISVTrainer_get_acc_u_a2,
-   (setter)PyBobLearnMiscISVTrainer_set_acc_u_a2,
-   acc_u_a2.doc(),
-   0
-  },
-  {
-   __X__.name(),
-   (getter)PyBobLearnMiscISVTrainer_get_X,
-   (setter)PyBobLearnMiscISVTrainer_set_X,
-   __X__.doc(),
-   0
-  },
-  {
-   __Z__.name(),
-   (getter)PyBobLearnMiscISVTrainer_get_Z,
-   (setter)PyBobLearnMiscISVTrainer_set_Z,
-   __Z__.doc(),
-   0
-  },
-  
-  
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "",
-  true
-)
-.add_prototype("isv_base,stats")
-.add_parameter("isv_base", ":py:class:`bob.learn.misc.ISVBase`", "ISVBase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscISVTrainer_initialize(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscISVBaseObject* isv_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscISVBase_Type, &isv_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->initialize(*isv_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** e_step ***/
-static auto e_step = bob::extension::FunctionDoc(
-  "e_step",
-  "Call the e-step procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("isv_base,stats")
-.add_parameter("isv_base", ":py:class:`bob.learn.misc.ISVBase`", "ISVBase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscISVTrainer_e_step(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = e_step.kwlist(0);
-
-  PyBobLearnMiscISVBaseObject* isv_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscISVBase_Type, &isv_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->eStep(*isv_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** m_step ***/
-static auto m_step = bob::extension::FunctionDoc(
-  "m_step",
-  "Call the m-step procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("isv_base")
-.add_parameter("isv_base", ":py:class:`bob.learn.misc.ISVBase`", "ISVBase Object");
-static PyObject* PyBobLearnMiscISVTrainer_m_step(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot 
-  char** kwlist = m_step.kwlist(0);
-
-  PyBobLearnMiscISVBaseObject* isv_base = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscISVBase_Type, &isv_base)) return 0;
-
-  self->cxx->mStep(*isv_base->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the m_step method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-/*** enrol ***/
-static auto enrol = bob::extension::FunctionDoc(
-  "enrol",
-  "",
-  "",
-  true
-)
-.add_prototype("isv_machine,features,n_iter","")
-.add_parameter("isv_machine", ":py:class:`bob.learn.misc.ISVMachine`", "ISVMachine Object")
-.add_parameter("features", "list(:py:class:`bob.learn.misc.GMMStats`)`", "")
-.add_parameter("n_iter", "int", "Number of iterations");
-static PyObject* PyBobLearnMiscISVTrainer_enrol(PyBobLearnMiscISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = enrol.kwlist(0);
-
-  PyBobLearnMiscISVMachineObject* isv_machine = 0;
-  PyObject* stats = 0;
-  int n_iter = 1;
-
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!i", kwlist, &PyBobLearnMiscISVMachine_Type, &isv_machine,
-                                                                  &PyList_Type, &stats, &n_iter)) Py_RETURN_NONE;
-
-  std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > training_data;
-  if(extract_GMMStats_1d(stats ,training_data)==0)
-    self->cxx->enrol(*isv_machine->cxx, training_data, n_iter);
-
-  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-static PyMethodDef PyBobLearnMiscISVTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscISVTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    e_step.name(),
-    (PyCFunction)PyBobLearnMiscISVTrainer_e_step,
-    METH_VARARGS|METH_KEYWORDS,
-    e_step.doc()
-  },
-  {
-    m_step.name(),
-    (PyCFunction)PyBobLearnMiscISVTrainer_m_step,
-    METH_VARARGS|METH_KEYWORDS,
-    m_step.doc()
-  },
-  {
-    enrol.name(),
-    (PyCFunction)PyBobLearnMiscISVTrainer_enrol,
-    METH_VARARGS|METH_KEYWORDS,
-    enrol.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscISVTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscISVTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscISVTrainer_Type.tp_name      = ISVTrainer_doc.name();
-  PyBobLearnMiscISVTrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscISVTrainerObject);
-  PyBobLearnMiscISVTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
-  PyBobLearnMiscISVTrainer_Type.tp_doc       = ISVTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscISVTrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscISVTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscISVTrainer_init);
-  PyBobLearnMiscISVTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscISVTrainer_delete);
-  PyBobLearnMiscISVTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscISVTrainer_RichCompare);
-  PyBobLearnMiscISVTrainer_Type.tp_methods      = PyBobLearnMiscISVTrainer_methods;
-  PyBobLearnMiscISVTrainer_Type.tp_getset       = PyBobLearnMiscISVTrainer_getseters;
-  //PyBobLearnMiscISVTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscISVTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscISVTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscISVTrainer_Type);
-  return PyModule_AddObject(module, "_ISVTrainer", (PyObject*)&PyBobLearnMiscISVTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/ivector_machine.cpp b/bob/learn/misc/ivector_machine.cpp
deleted file mode 100644
index 5251905af23293d566b9ed9efb24d9838f90120a..0000000000000000000000000000000000000000
--- a/bob/learn/misc/ivector_machine.cpp
+++ /dev/null
@@ -1,676 +0,0 @@
-/**
- * @date Wed Jan 28 17:46:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto IVectorMachine_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".IVectorMachine",
-  "An IVectorMachine consists of a Total Variability subspace \f$T\f$ and allows the extraction of IVector"
-  "References: [Dehak2010]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new IVectorMachine",
-    "",
-    true
-  )
-  .add_prototype("ubm, rt, variance_threshold","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-
-  .add_parameter("ubm", ":py:class:`bob.learn.misc.GMMMachine`", "The Universal Background Model.")
-  .add_parameter("rt", "int", "Size of the Total Variability matrix (CD x rt).")
-  .add_parameter("variance_threshold", "double", "Variance flooring threshold for the :math:`\\Sigma` (diagonal) matrix")
-
-  .add_parameter("other", ":py:class:`bob.learn.misc.IVectorMachine`", "A IVectorMachine object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscIVectorMachine_init_copy(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = IVectorMachine_doc.kwlist(1);
-  PyBobLearnMiscIVectorMachineObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscIVectorMachine_Type, &o)){
-    IVectorMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::IVectorMachine(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscIVectorMachine_init_hdf5(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = IVectorMachine_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    IVectorMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::IVectorMachine(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscIVectorMachine_init_ubm(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = IVectorMachine_doc.kwlist(0);
-  
-  PyBobLearnMiscGMMMachineObject* gmm_machine;
-  int rt = 1;
-  double variance_threshold = 1e-10;
-
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnMiscGMMMachine_Type, &gmm_machine, &rt, &variance_threshold)){
-    IVectorMachine_doc.print_usage();
-    return -1;
-  }
-    
-  if(rt < 1){
-    PyErr_Format(PyExc_TypeError, "rt argument must be greater than or equal to one");
-    return -1;
-  }
-  
-  if(variance_threshold <= 0){
-    PyErr_Format(PyExc_TypeError, "variance_threshold argument must be greater than zero");
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::IVectorMachine(gmm_machine->cxx, rt, variance_threshold));
-  return 0;
-}
-
-
-static int PyBobLearnMiscIVectorMachine_init(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs == 1){
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is Gaussian object
-    if (PyBobLearnMiscIVectorMachine_Check(arg))
-      return PyBobLearnMiscIVectorMachine_init_copy(self, args, kwargs);
-    // If the constructor input is a HDF5
-    else
-      return PyBobLearnMiscIVectorMachine_init_hdf5(self, args, kwargs);
-  }
-  else if ((nargs == 2) || (nargs == 3))
-    PyBobLearnMiscIVectorMachine_init_ubm(self, args, kwargs);
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1,2 or 3 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-    IVectorMachine_doc.print_usage();
-    return -1;
-  }
-  
-  BOB_CATCH_MEMBER("cannot create IVectorMachine", 0)
-  return 0;
-}
-
-static void PyBobLearnMiscIVectorMachine_delete(PyBobLearnMiscIVectorMachineObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscIVectorMachine_RichCompare(PyBobLearnMiscIVectorMachineObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscIVectorMachine_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscIVectorMachineObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare IVectorMachine objects", 0)
-}
-
-int PyBobLearnMiscIVectorMachine_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscIVectorMachine_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int)",
-  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rT (total variability matrix) ``(#Gaussians, #Inputs, #rT)``.",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorMachine_getShape(PyBobLearnMiscIVectorMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRt());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** supervector_length *****/
-static auto supervector_length = bob::extension::VariableDoc(
-  "supervector_length",
-  "int",
-
-  "Returns the supervector length."
-  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
-  "@warning An exception is thrown if no Universal Background Model has been set yet."
-);
-PyObject* PyBobLearnMiscIVectorMachine_getSupervectorLength(PyBobLearnMiscIVectorMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("i", self->cxx->getSupervectorLength());
-  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
-}
-
-
-/***** T *****/
-static auto T = bob::extension::VariableDoc(
-  "t",
-  "array_like <float, 2D>",
-  "Returns the Total Variability matrix",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorMachine_getT(PyBobLearnMiscIVectorMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getT());
-  BOB_CATCH_MEMBER("`t` could not be read", 0)
-}
-int PyBobLearnMiscIVectorMachine_setT(PyBobLearnMiscIVectorMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, T.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "t");
-  if (!b) return -1;
-  self->cxx->setT(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`t` vector could not be set", -1)
-}
-
-
-/***** sigma *****/
-static auto sigma = bob::extension::VariableDoc(
-  "sigma",
-  "array_like <float, 1D>",
-  "The residual matrix of the model sigma",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorMachine_getSigma(PyBobLearnMiscIVectorMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getSigma());
-  BOB_CATCH_MEMBER("`sigma` could not be read", 0)
-}
-int PyBobLearnMiscIVectorMachine_setSigma(PyBobLearnMiscIVectorMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, sigma.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "sigma");
-  if (!b) return -1;
-  self->cxx->setSigma(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`sigma` vector could not be set", -1)
-}
-
-
-/***** variance_threshold *****/
-static auto variance_threshold = bob::extension::VariableDoc(
-  "variance_threshold",
-  "double",
-  "Threshold for the variance contained in sigma",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorMachine_getVarianceThreshold(PyBobLearnMiscIVectorMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("d", self->cxx->getVarianceThreshold());
-  BOB_CATCH_MEMBER("variance_threshold could not be read", 0)
-}
-int PyBobLearnMiscIVectorMachine_setVarianceThreshold(PyBobLearnMiscIVectorMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, variance_threshold.name());
-    return -1;
-  }
-
-  if (PyFloat_AS_DOUBLE(value) < 0){
-    PyErr_Format(PyExc_TypeError, "variance_threshold must be greater than or equal to zero");
-    return -1;
-  }
-
-  self->cxx->setVarianceThreshold(PyFloat_AS_DOUBLE(value));
-  BOB_CATCH_MEMBER("variance_threshold could not be set", -1)
-  return 0;
-}
-
-
-/***** ubm *****/
-static auto ubm = bob::extension::VariableDoc(
-  "ubm",
-  ":py:class:`bob.learn.misc.GMMMachine`",
-  "Returns the UBM (Universal Background Model",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorMachine_getUBM(PyBobLearnMiscIVectorMachineObject* self, void*){
-  BOB_TRY
-
-  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm_gmmMachine = self->cxx->getUbm();
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscGMMMachineObject* retval =
-    (PyBobLearnMiscGMMMachineObject*)PyBobLearnMiscGMMMachine_Type.tp_alloc(&PyBobLearnMiscGMMMachine_Type, 0);
-  retval->cxx = ubm_gmmMachine;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("ubm could not be read", 0)
-}
-int PyBobLearnMiscIVectorMachine_setUBM(PyBobLearnMiscIVectorMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMMachine_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.GMMMachine`", Py_TYPE(self)->tp_name, ubm.name());
-    return -1;
-  }
-
-  PyBobLearnMiscGMMMachineObject* ubm_gmmMachine = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscGMMMachine_Type,&ubm_gmmMachine);
-
-  self->cxx->setUbm(ubm_gmmMachine->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("ubm could not be set", -1)  
-}
-
-
-static PyGetSetDef PyBobLearnMiscIVectorMachine_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscIVectorMachine_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  
-  {
-   supervector_length.name(),
-   (getter)PyBobLearnMiscIVectorMachine_getSupervectorLength,
-   0,
-   supervector_length.doc(),
-   0
-  },
-  
-  {
-   T.name(),
-   (getter)PyBobLearnMiscIVectorMachine_getT,
-   (setter)PyBobLearnMiscIVectorMachine_setT,
-   T.doc(),
-   0
-  },
-
-  {
-   variance_threshold.name(),
-   (getter)PyBobLearnMiscIVectorMachine_getVarianceThreshold,
-   (setter)PyBobLearnMiscIVectorMachine_setVarianceThreshold,
-   variance_threshold.doc(),
-   0
-  },
-
-  {
-   sigma.name(),
-   (getter)PyBobLearnMiscIVectorMachine_getSigma,
-   (setter)PyBobLearnMiscIVectorMachine_setSigma,
-   sigma.doc(),
-   0
-  },
-
-  {
-   ubm.name(),
-   (getter)PyBobLearnMiscIVectorMachine_getUBM,
-   (setter)PyBobLearnMiscIVectorMachine_setUBM,
-   ubm.doc(),
-   0
-  },
-
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the IVectorMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscIVectorMachine_Save(PyBobLearnMiscIVectorMachineObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the IVectorMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscIVectorMachine_Load(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this IVectorMachine with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.IVectorMachine`", "A IVectorMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscIVectorMachine_IsSimilarTo(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscIVectorMachineObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscIVectorMachine_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-
-/*** forward ***/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
-  "Execute the machine",
-  "", 
-  true
-)
-.add_prototype("stats")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnMiscIVectorMachine_Forward(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = forward.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-   blitz::Array<double,1> ivector(self->cxx->getDimRt());
-   self->cxx->forward(*stats->cxx, ivector);
-
-  return PyBlitzArrayCxx_AsConstNumpy(ivector);
-  
-  BOB_CATCH_MEMBER("cannot forward", 0)
-
-}
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Resets the dimensionality of the subspace T. ",
-  0,
-  true
-)
-.add_prototype("rT")
-.add_parameter("rT", "int", "Size of T (Total variability matrix)");
-static PyObject* PyBobLearnMiscIVectorMachine_resize(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int rT = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &rT)) Py_RETURN_NONE;
-
-  if (rT < 1){
-    PyErr_Format(PyExc_TypeError, "rU must be greater than one");
-    resize.print_usage();
-    return 0;
-  }
-
-  self->cxx->resize(rT);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** __compute_Id_TtSigmaInvT__ ***/
-static auto __compute_Id_TtSigmaInvT__ = bob::extension::FunctionDoc(
-  "__compute_Id_TtSigmaInvT__",
-  "",
-  "", 
-  true
-)
-.add_prototype("stats")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnMiscIVectorMachine_compute_Id_TtSigmaInvT__(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = __compute_Id_TtSigmaInvT__.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-
-  blitz::Array<double,2> output(self->cxx->getDimRt(), self->cxx->getDimRt());
-  self->cxx->computeIdTtSigmaInvT(*stats->cxx, output);
-  return PyBlitzArrayCxx_AsConstNumpy(output);
-  
-  BOB_CATCH_MEMBER("cannot __compute_Id_TtSigmaInvT__", 0)
-}
-
-
-
-/*** __compute_TtSigmaInvFnorm__ ***/
-static auto __compute_TtSigmaInvFnorm__ = bob::extension::FunctionDoc(
-  "__compute_TtSigmaInvFnorm__",
-  "",
-  "", 
-  true
-)
-.add_prototype("stats")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnMiscIVectorMachine_compute_TtSigmaInvFnorm__(PyBobLearnMiscIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = __compute_TtSigmaInvFnorm__.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-
-  blitz::Array<double,1> output(self->cxx->getDimRt());
-  self->cxx->computeTtSigmaInvFnorm(*stats->cxx, output);
-  return PyBlitzArrayCxx_AsConstNumpy(output);
-  
-  BOB_CATCH_MEMBER("cannot __compute_TtSigmaInvFnorm__", 0)
-}
-
-
-
-
-static PyMethodDef PyBobLearnMiscIVectorMachine_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  {
-    __compute_Id_TtSigmaInvT__.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_compute_Id_TtSigmaInvT__,
-    METH_VARARGS|METH_KEYWORDS,
-    __compute_Id_TtSigmaInvT__.doc()
-  },
-  {
-    __compute_TtSigmaInvFnorm__.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_compute_TtSigmaInvFnorm__,
-    METH_VARARGS|METH_KEYWORDS,
-    __compute_TtSigmaInvFnorm__.doc()
-  },
-
-/*
-  {
-    forward.name(),
-    (PyCFunction)PyBobLearnMiscIVectorMachine_Forward,
-    METH_VARARGS|METH_KEYWORDS,
-    forward.doc()
-  },*/
-
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the JFA type struct; will be initialized later
-PyTypeObject PyBobLearnMiscIVectorMachine_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscIVectorMachine(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscIVectorMachine_Type.tp_name      = IVectorMachine_doc.name();
-  PyBobLearnMiscIVectorMachine_Type.tp_basicsize = sizeof(PyBobLearnMiscIVectorMachineObject);
-  PyBobLearnMiscIVectorMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscIVectorMachine_Type.tp_doc       = IVectorMachine_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscIVectorMachine_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscIVectorMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscIVectorMachine_init);
-  PyBobLearnMiscIVectorMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscIVectorMachine_delete);
-  PyBobLearnMiscIVectorMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscIVectorMachine_RichCompare);
-  PyBobLearnMiscIVectorMachine_Type.tp_methods     = PyBobLearnMiscIVectorMachine_methods;
-  PyBobLearnMiscIVectorMachine_Type.tp_getset      = PyBobLearnMiscIVectorMachine_getseters;
-  PyBobLearnMiscIVectorMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnMiscIVectorMachine_Forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscIVectorMachine_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscIVectorMachine_Type);
-  return PyModule_AddObject(module, "IVectorMachine", (PyObject*)&PyBobLearnMiscIVectorMachine_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/ivector_trainer.cpp b/bob/learn/misc/ivector_trainer.cpp
deleted file mode 100644
index 958d2a9f1792e790ef330beb4a0a47df0e536d3a..0000000000000000000000000000000000000000
--- a/bob/learn/misc/ivector_trainer.cpp
+++ /dev/null
@@ -1,453 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Tue 03 Fev 10:29:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-#include <boost/make_shared.hpp>
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-static int extract_GMMStats_1d(PyObject *list,
-                             std::vector<bob::learn::misc::GMMStats>& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
-    PyBobLearnMiscGMMStatsObject* stats;
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
-      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
-      return -1;
-    }
-    training_data.push_back(*stats->cxx);
-
-  }
-  return 0;
-}
-
-
-static auto IVectorTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".IVectorTrainer",
-  "IVectorTrainer"
-  "An IVectorTrainer to learn a Total Variability subspace :math:`$T$`"
-  " (and eventually a covariance matrix :math:`$\\Sigma$`).",
-  " References: [Dehak2010]"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new IVectorTrainer",
-    "",
-    true
-  )
-  .add_prototype("update_sigma","")
-  .add_prototype("other","")
-  .add_prototype("","")
-  .add_parameter("other", ":py:class:`bob.learn.misc.IVectorTrainer`", "A IVectorTrainer object to be copied.")
-  .add_parameter("update_sigma", "bool", "")
-);
-
-
-static int PyBobLearnMiscIVectorTrainer_init_copy(PyBobLearnMiscIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = IVectorTrainer_doc.kwlist(1);
-  PyBobLearnMiscIVectorTrainerObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscIVectorTrainer_Type, &o)){
-    IVectorTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::IVectorTrainer(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscIVectorTrainer_init_bool(PyBobLearnMiscIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = IVectorTrainer_doc.kwlist(0);
-  PyObject* update_sigma   = 0;
-
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBool_Type, &update_sigma))
-    return -1;
-  
-  self->cxx.reset(new bob::learn::misc::IVectorTrainer(f(update_sigma)));
-  return 0;
-}
-
-
-static int PyBobLearnMiscIVectorTrainer_init(PyBobLearnMiscIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  switch(nargs){
-    case 0:{
-      self->cxx.reset(new bob::learn::misc::IVectorTrainer());
-      return 0;
-    }
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is IVectorTrainer object
-      if(PyBobLearnMiscIVectorTrainer_Check(arg))            
-        return  PyBobLearnMiscIVectorTrainer_init_copy(self, args, kwargs);
-      else
-        return PyBobLearnMiscIVectorTrainer_init_bool(self, args, kwargs);      
-      
-    }
-    default:{
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      IVectorTrainer_doc.print_usage();
-      return -1;
-    }
-  }
-  BOB_CATCH_MEMBER("cannot create IVectorTrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscIVectorTrainer_delete(PyBobLearnMiscIVectorTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscIVectorTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscIVectorTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscIVectorTrainer_RichCompare(PyBobLearnMiscIVectorTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscIVectorTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscIVectorTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare IVectorTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-static auto acc_nij_wij2 = bob::extension::VariableDoc(
-  "acc_nij_wij2",
-  "array_like <float, 3D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorTrainer_get_acc_nij_wij2(PyBobLearnMiscIVectorTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccNijWij2());
-  BOB_CATCH_MEMBER("acc_nij_wij2 could not be read", 0)
-}
-int PyBobLearnMiscIVectorTrainer_set_acc_nij_wij2(PyBobLearnMiscIVectorTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_nij_wij2.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_nij_wij2");
-  if (!b) return -1;
-  self->cxx->setAccNijWij2(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_nij_wij2 could not be set", -1)
-}
-
-
-static auto acc_fnormij_wij = bob::extension::VariableDoc(
-  "acc_fnormij_wij",
-  "array_like <float, 3D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorTrainer_get_acc_fnormij_wij(PyBobLearnMiscIVectorTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccFnormijWij());
-  BOB_CATCH_MEMBER("acc_fnormij_wij could not be read", 0)
-}
-int PyBobLearnMiscIVectorTrainer_set_acc_fnormij_wij(PyBobLearnMiscIVectorTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_fnormij_wij.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_fnormij_wij");
-  if (!b) return -1;
-  self->cxx->setAccFnormijWij(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_fnormij_wij could not be set", -1)
-}
-
-
-static auto acc_nij = bob::extension::VariableDoc(
-  "acc_nij",
-  "array_like <float, 1D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorTrainer_get_acc_nij(PyBobLearnMiscIVectorTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccNij());
-  BOB_CATCH_MEMBER("acc_nij could not be read", 0)
-}
-int PyBobLearnMiscIVectorTrainer_set_acc_nij(PyBobLearnMiscIVectorTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_nij.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_nij");
-  if (!b) return -1;
-  self->cxx->setAccNij(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_nij could not be set", -1)
-}
-
-
-static auto acc_snormij = bob::extension::VariableDoc(
-  "acc_snormij",
-  "array_like <float, 2D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscIVectorTrainer_get_acc_snormij(PyBobLearnMiscIVectorTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccSnormij());
-  BOB_CATCH_MEMBER("acc_snormij could not be read", 0)
-}
-int PyBobLearnMiscIVectorTrainer_set_acc_snormij(PyBobLearnMiscIVectorTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_snormij.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_snormij");
-  if (!b) return -1;
-  self->cxx->setAccSnormij(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_snormij could not be set", -1)
-}
-
-
-
-
-static PyGetSetDef PyBobLearnMiscIVectorTrainer_getseters[] = { 
-  {
-   acc_nij_wij2.name(),
-   (getter)PyBobLearnMiscIVectorTrainer_get_acc_nij_wij2,
-   (setter)PyBobLearnMiscIVectorTrainer_set_acc_nij_wij2,
-   acc_nij_wij2.doc(),
-   0
-  },  
-  {
-   acc_fnormij_wij.name(),
-   (getter)PyBobLearnMiscIVectorTrainer_get_acc_fnormij_wij,
-   (setter)PyBobLearnMiscIVectorTrainer_set_acc_fnormij_wij,
-   acc_fnormij_wij.doc(),
-   0
-  },
-  {
-   acc_nij.name(),
-   (getter)PyBobLearnMiscIVectorTrainer_get_acc_nij,
-   (setter)PyBobLearnMiscIVectorTrainer_set_acc_nij,
-   acc_nij.doc(),
-   0
-  },
-  {
-   acc_snormij.name(),
-   (getter)PyBobLearnMiscIVectorTrainer_get_acc_snormij,
-   (setter)PyBobLearnMiscIVectorTrainer_set_acc_snormij,
-   acc_snormij.doc(),
-   0
-  },
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "",
-  true
-)
-.add_prototype("ivector_machine")
-.add_parameter("ivector_machine", ":py:class:`bob.learn.misc.ISVBase`", "IVectorMachine Object");
-static PyObject* PyBobLearnMiscIVectorTrainer_initialize(PyBobLearnMiscIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscIVectorMachineObject* ivector_machine = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscIVectorMachine_Type, &ivector_machine)) Py_RETURN_NONE;
-
-  self->cxx->initialize(*ivector_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** e_step ***/
-static auto e_step = bob::extension::FunctionDoc(
-  "e_step",
-  "Call the e-step procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("ivector_machine,stats")
-.add_parameter("ivector_machine", ":py:class:`bob.learn.misc.ISVBase`", "IVectorMachine Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscIVectorTrainer_e_step(PyBobLearnMiscIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = e_step.kwlist(0);
-
-  PyBobLearnMiscIVectorMachineObject* ivector_machine = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscIVectorMachine_Type, &ivector_machine,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<bob::learn::misc::GMMStats> training_data;
-  if(extract_GMMStats_1d(stats ,training_data)==0)
-    self->cxx->eStep(*ivector_machine->cxx, training_data);
-
-  Py_RETURN_NONE;
-  BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
-}
-
-
-/*** m_step ***/
-static auto m_step = bob::extension::FunctionDoc(
-  "m_step",
-  "Call the m-step procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("ivector_machine")
-.add_parameter("ivector_machine", ":py:class:`bob.learn.misc.ISVBase`", "IVectorMachine Object");
-static PyObject* PyBobLearnMiscIVectorTrainer_m_step(PyBobLearnMiscIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot 
-  char** kwlist = m_step.kwlist(0);
-
-  PyBobLearnMiscIVectorMachineObject* ivector_machine = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscIVectorMachine_Type, &ivector_machine)) Py_RETURN_NONE;
-
-  self->cxx->mStep(*ivector_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the m_step method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-static PyMethodDef PyBobLearnMiscIVectorTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscIVectorTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    e_step.name(),
-    (PyCFunction)PyBobLearnMiscIVectorTrainer_e_step,
-    METH_VARARGS|METH_KEYWORDS,
-    e_step.doc()
-  },
-  {
-    m_step.name(),
-    (PyCFunction)PyBobLearnMiscIVectorTrainer_m_step,
-    METH_VARARGS|METH_KEYWORDS,
-    m_step.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscIVectorTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscIVectorTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscIVectorTrainer_Type.tp_name      = IVectorTrainer_doc.name();
-  PyBobLearnMiscIVectorTrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscIVectorTrainerObject);
-  PyBobLearnMiscIVectorTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
-  PyBobLearnMiscIVectorTrainer_Type.tp_doc       = IVectorTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscIVectorTrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscIVectorTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscIVectorTrainer_init);
-  PyBobLearnMiscIVectorTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscIVectorTrainer_delete);
-  PyBobLearnMiscIVectorTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscIVectorTrainer_RichCompare);
-  PyBobLearnMiscIVectorTrainer_Type.tp_methods      = PyBobLearnMiscIVectorTrainer_methods;
-  PyBobLearnMiscIVectorTrainer_Type.tp_getset       = PyBobLearnMiscIVectorTrainer_getseters;
-  //PyBobLearnMiscIVectorTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscIVectorTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscIVectorTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscIVectorTrainer_Type);
-  return PyModule_AddObject(module, "_IVectorTrainer", (PyObject*)&PyBobLearnMiscIVectorTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/jfa_base.cpp b/bob/learn/misc/jfa_base.cpp
deleted file mode 100644
index 5793919e134042ed6e2840e90847e74e4bd1eb3e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/jfa_base.cpp
+++ /dev/null
@@ -1,578 +0,0 @@
-/**
- * @date Wed Jan 27 17:03:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto JFABase_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".JFABase",
-  "A JFABase instance can be seen as a container for U, V and D when performing Joint Factor Analysis (JFA)."
-  "References: [Vogt2008,McCool2013]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new JFABase",
-    "",
-    true
-  )
-  .add_prototype("gmm,ru,rv","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-  .add_prototype("","")
-
-  .add_parameter("gmm", ":py:class:`bob.learn.misc.GMMMachine`", "The Universal Background Model.")
-  .add_parameter("ru", "int", "Size of U (Within client variation matrix). In the end the U matrix will have (number_of_gaussians * feature_dimension x ru)")
-  .add_parameter("rv", "int", "Size of V (Between client variation matrix). In the end the U matrix will have (number_of_gaussians * feature_dimension x rv)")
-  .add_parameter("other", ":py:class:`bob.learn.misc.JFABase`", "A JFABase object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscJFABase_init_copy(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFABase_doc.kwlist(1);
-  PyBobLearnMiscJFABaseObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscJFABase_Type, &o)){
-    JFABase_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::JFABase(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFABase_init_hdf5(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFABase_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    JFABase_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::JFABase(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFABase_init_ubm(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFABase_doc.kwlist(0);
-  
-  PyBobLearnMiscGMMMachineObject* ubm;
-  int ru = 1;
-  int rv = 1;
-
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!ii", kwlist, &PyBobLearnMiscGMMMachine_Type, &ubm,
-                                                                &ru, &rv)){
-    JFABase_doc.print_usage();
-    return -1;
-  }
-  
-  if(ru < 0){
-    PyErr_Format(PyExc_TypeError, "ru argument must be greater than or equal to one");
-    return -1;
-  }
-  
-  if(rv < 0){
-    PyErr_Format(PyExc_TypeError, "rv argument must be greater than or equal to one");
-    return -1;
-  }
-  
-  self->cxx.reset(new bob::learn::misc::JFABase(ubm->cxx, ru, rv));
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFABase_init(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-    
-  switch (nargs) {
-
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is Gaussian object
-     if (PyBobLearnMiscJFABase_Check(arg))
-       return PyBobLearnMiscJFABase_init_copy(self, args, kwargs);
-      // If the constructor input is a HDF5
-     else if (PyBobIoHDF5File_Check(arg))
-       return PyBobLearnMiscJFABase_init_hdf5(self, args, kwargs);
-    }
-    case 3:
-      return PyBobLearnMiscJFABase_init_ubm(self, args, kwargs);
-    default:
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1 or 3 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      JFABase_doc.print_usage();
-      return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create JFABase", 0)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscJFABase_delete(PyBobLearnMiscJFABaseObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscJFABase_RichCompare(PyBobLearnMiscJFABaseObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscJFABase_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscJFABaseObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare JFABase objects", 0)
-}
-
-int PyBobLearnMiscJFABase_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscJFABase_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int, int)",
-  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rU (within client variability matrix) and dimensionality of the rV (between client variability matrix) ``(#Gaussians, #Inputs, #rU, #rV)``.",
-  ""
-);
-PyObject* PyBobLearnMiscJFABase_getShape(PyBobLearnMiscJFABaseObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu(), self->cxx->getDimRv());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** supervector_length *****/
-static auto supervector_length = bob::extension::VariableDoc(
-  "supervector_length",
-  "int",
-
-  "Returns the supervector length."
-  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
-  "@warning An exception is thrown if no Universal Background Model has been set yet."
-);
-PyObject* PyBobLearnMiscJFABase_getSupervectorLength(PyBobLearnMiscJFABaseObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("i", self->cxx->getSupervectorLength());
-  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
-}
-
-
-/***** u *****/
-static auto U = bob::extension::VariableDoc(
-  "u",
-  "array_like <float, 2D>",
-  "Returns the U matrix (within client variability matrix)",
-  ""
-);
-PyObject* PyBobLearnMiscJFABase_getU(PyBobLearnMiscJFABaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getU());
-  BOB_CATCH_MEMBER("``u`` could not be read", 0)
-}
-int PyBobLearnMiscJFABase_setU(PyBobLearnMiscJFABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, U.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "u");
-  if (!b) return -1;
-  self->cxx->setU(*b);
-  return 0;
-  BOB_CATCH_MEMBER("``u`` matrix could not be set", -1)
-}
-
-/***** v *****/
-static auto V = bob::extension::VariableDoc(
-  "v",
-  "array_like <float, 2D>",
-  "Returns the V matrix (between client variability matrix)",
-  ""
-);
-PyObject* PyBobLearnMiscJFABase_getV(PyBobLearnMiscJFABaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getV());
-  BOB_CATCH_MEMBER("``v`` could not be read", 0)
-}
-int PyBobLearnMiscJFABase_setV(PyBobLearnMiscJFABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, V.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "v");
-  if (!b) return -1;
-  self->cxx->setV(*b);
-  return 0;
-  BOB_CATCH_MEMBER("``v`` matrix could not be set", -1)
-}
-
-
-/***** d *****/
-static auto D = bob::extension::VariableDoc(
-  "d",
-  "array_like <float, 1D>",
-  "Returns the diagonal matrix diag(d) (as a 1D vector)",
-  ""
-);
-PyObject* PyBobLearnMiscJFABase_getD(PyBobLearnMiscJFABaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getD());
-  BOB_CATCH_MEMBER("``d`` could not be read", 0)
-}
-int PyBobLearnMiscJFABase_setD(PyBobLearnMiscJFABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, D.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "d");
-  if (!b) return -1;
-  self->cxx->setD(*b);
-  return 0;
-  BOB_CATCH_MEMBER("``d`` matrix could not be set", -1)
-}
-
-
-/***** ubm *****/
-static auto ubm = bob::extension::VariableDoc(
-  "ubm",
-  ":py:class:`bob.learn.misc.GMMMachine`",
-  "Returns the UBM (Universal Background Model",
-  ""
-);
-PyObject* PyBobLearnMiscJFABase_getUBM(PyBobLearnMiscJFABaseObject* self, void*){
-  BOB_TRY
-
-  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm_gmmMachine = self->cxx->getUbm();
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscGMMMachineObject* retval =
-    (PyBobLearnMiscGMMMachineObject*)PyBobLearnMiscGMMMachine_Type.tp_alloc(&PyBobLearnMiscGMMMachine_Type, 0);
-  retval->cxx = ubm_gmmMachine;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("ubm could not be read", 0)
-}
-int PyBobLearnMiscJFABase_setUBM(PyBobLearnMiscJFABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscGMMMachine_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.GMMMachine`", Py_TYPE(self)->tp_name, ubm.name());
-    return -1;
-  }
-
-  PyBobLearnMiscGMMMachineObject* ubm_gmmMachine = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscGMMMachine_Type,&ubm_gmmMachine);
-
-  self->cxx->setUbm(ubm_gmmMachine->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("ubm could not be set", -1)  
-}
-
-
-
-
-static PyGetSetDef PyBobLearnMiscJFABase_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscJFABase_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  
-  {
-   supervector_length.name(),
-   (getter)PyBobLearnMiscJFABase_getSupervectorLength,
-   0,
-   supervector_length.doc(),
-   0
-  },
-  
-  {
-   U.name(),
-   (getter)PyBobLearnMiscJFABase_getU,
-   (setter)PyBobLearnMiscJFABase_setU,
-   U.doc(),
-   0
-  },
-  
-  {
-   V.name(),
-   (getter)PyBobLearnMiscJFABase_getV,
-   (setter)PyBobLearnMiscJFABase_setV,
-   V.doc(),
-   0
-  },
-
-  {
-   D.name(),
-   (getter)PyBobLearnMiscJFABase_getD,
-   (setter)PyBobLearnMiscJFABase_setD,
-   D.doc(),
-   0
-  },
-
-  {
-   ubm.name(),
-   (getter)PyBobLearnMiscJFABase_getUBM,
-   (setter)PyBobLearnMiscJFABase_setUBM,
-   ubm.doc(),
-   0
-  },
-
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the JFABase to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscJFABase_Save(PyBobLearnMiscJFABaseObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the JFABase to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscJFABase_Load(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this JFABase with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.JFABase`", "A JFABase object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscJFABase_IsSimilarTo(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscJFABaseObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscJFABase_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Resets the dimensionality of the subspace U and V. "
-  "U and V are hence uninitialized",
-  0,
-  true
-)
-.add_prototype("rU,rV")
-.add_parameter("rU", "int", "Size of U (Within client variation matrix)")
-.add_parameter("rV", "int", "Size of V (Between client variation matrix)");
-static PyObject* PyBobLearnMiscJFABase_resize(PyBobLearnMiscJFABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int rU = 0;
-  int rV = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &rU, &rV)) Py_RETURN_NONE;
-
-  if (rU <= 0){
-    PyErr_Format(PyExc_TypeError, "rU must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-  if (rV <= 0){
-    PyErr_Format(PyExc_TypeError, "rV must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-  self->cxx->resize(rU, rV);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-
-static PyMethodDef PyBobLearnMiscJFABase_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscJFABase_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscJFABase_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscJFABase_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscJFABase_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the JFA type struct; will be initialized later
-PyTypeObject PyBobLearnMiscJFABase_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscJFABase(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscJFABase_Type.tp_name      = JFABase_doc.name();
-  PyBobLearnMiscJFABase_Type.tp_basicsize = sizeof(PyBobLearnMiscJFABaseObject);
-  PyBobLearnMiscJFABase_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscJFABase_Type.tp_doc       = JFABase_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscJFABase_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscJFABase_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscJFABase_init);
-  PyBobLearnMiscJFABase_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscJFABase_delete);
-  PyBobLearnMiscJFABase_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscJFABase_RichCompare);
-  PyBobLearnMiscJFABase_Type.tp_methods     = PyBobLearnMiscJFABase_methods;
-  PyBobLearnMiscJFABase_Type.tp_getset      = PyBobLearnMiscJFABase_getseters;
-  //PyBobLearnMiscJFABase_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscJFABase_forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscJFABase_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscJFABase_Type);
-  return PyModule_AddObject(module, "JFABase", (PyObject*)&PyBobLearnMiscJFABase_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/jfa_machine.cpp b/bob/learn/misc/jfa_machine.cpp
deleted file mode 100644
index af714125e44421599934534422f4eca07bab120e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/jfa_machine.cpp
+++ /dev/null
@@ -1,650 +0,0 @@
-/**
- * @date Wed Jan 28 17:03:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto JFAMachine_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".JFAMachine",
-  "A JFAMachine. An attached :py:class:`bob.learn.misc.JFABase` should be provided for Joint Factor Analysis. The :py:class:`bob.learn.misc.JFAMachine` carries information about the speaker factors y and z, whereas a :py:class:`bob.learn.misc.JFABase` carries information about the matrices U, V and D."
-  "References: [Vogt2008,McCool2013]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new JFAMachine",
-    "",
-    true
-  )
-  .add_prototype("jfa_base","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-
-  .add_parameter("jfa", ":py:class:`bob.learn.misc.JFABase`", "The JFABase associated with this machine")
-  .add_parameter("other", ":py:class:`bob.learn.misc.JFAMachine`", "A JFAMachine object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscJFAMachine_init_copy(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFAMachine_doc.kwlist(1);
-  PyBobLearnMiscJFAMachineObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscJFAMachine_Type, &o)){
-    JFAMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::JFAMachine(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFAMachine_init_hdf5(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFAMachine_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    JFAMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::JFAMachine(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFAMachine_init_jfabase(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFAMachine_doc.kwlist(0);
-  
-  PyBobLearnMiscJFABaseObject* jfa_base;
-
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base)){
-    JFAMachine_doc.print_usage();
-    return -1;
-  }
-  
-  self->cxx.reset(new bob::learn::misc::JFAMachine(jfa_base->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFAMachine_init(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs == 1){
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is Gaussian object
-    if (PyBobLearnMiscJFAMachine_Check(arg))
-      return PyBobLearnMiscJFAMachine_init_copy(self, args, kwargs);
-    // If the constructor input is a HDF5
-    else if (PyBobIoHDF5File_Check(arg))
-      return PyBobLearnMiscJFAMachine_init_hdf5(self, args, kwargs);
-    // If the constructor input is a JFABase Object
-    else
-      return PyBobLearnMiscJFAMachine_init_jfabase(self, args, kwargs);
-  }
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-    JFAMachine_doc.print_usage();
-    return -1;
-  }
-  
-  BOB_CATCH_MEMBER("cannot create JFAMachine", 0)
-  return 0;
-}
-
-static void PyBobLearnMiscJFAMachine_delete(PyBobLearnMiscJFAMachineObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscJFAMachine_RichCompare(PyBobLearnMiscJFAMachineObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscJFAMachine_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscJFAMachineObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare JFAMachine objects", 0)
-}
-
-int PyBobLearnMiscJFAMachine_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscJFAMachine_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int, int)",
-  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rU (within client variability matrix) and dimensionality of the rV (between client variability matrix) ``(#Gaussians, #Inputs, #rU, #rV)``.",
-  ""
-);
-PyObject* PyBobLearnMiscJFAMachine_getShape(PyBobLearnMiscJFAMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu(), self->cxx->getDimRv());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** supervector_length *****/
-static auto supervector_length = bob::extension::VariableDoc(
-  "supervector_length",
-  "int",
-
-  "Returns the supervector length."
-  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
-  "@warning An exception is thrown if no Universal Background Model has been set yet."
-);
-PyObject* PyBobLearnMiscJFAMachine_getSupervectorLength(PyBobLearnMiscJFAMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("i", self->cxx->getSupervectorLength());
-  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
-}
-
-
-/***** y *****/
-static auto Y = bob::extension::VariableDoc(
-  "y",
-  "array_like <float, 1D>",
-  "Returns the y speaker factor. Eq (30) from [McCool2013]",
-  ""
-);
-PyObject* PyBobLearnMiscJFAMachine_getY(PyBobLearnMiscJFAMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getY());
-  BOB_CATCH_MEMBER("`y` could not be read", 0)
-}
-int PyBobLearnMiscJFAMachine_setY(PyBobLearnMiscJFAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, Y.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "y");
-  if (!b) return -1;
-  self->cxx->setY(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`y` vector could not be set", -1)
-}
-
-
-/***** z *****/
-static auto Z = bob::extension::VariableDoc(
-  "z",
-  "array_like <float, 1D>",
-  "Returns the z speaker factor. Eq (31) from [McCool2013]",
-  ""
-);
-PyObject* PyBobLearnMiscJFAMachine_getZ(PyBobLearnMiscJFAMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZ());
-  BOB_CATCH_MEMBER("`z` could not be read", 0)
-}
-int PyBobLearnMiscJFAMachine_setZ(PyBobLearnMiscJFAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, Z.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "z");
-  if (!b) return -1;
-  self->cxx->setZ(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`z` vector could not be set", -1)
-}
-
-
-/***** x *****/
-static auto X = bob::extension::VariableDoc(
-  "x",
-  "array_like <float, 1D>",
-  "Returns the X session factor. Eq (29) from [McCool2013]",
-  "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines."
-);
-PyObject* PyBobLearnMiscJFAMachine_getX(PyBobLearnMiscJFAMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getX());
-  BOB_CATCH_MEMBER("`x` could not be read", 0)
-}
-
-
-/***** jfa_base *****/
-static auto jfa_base = bob::extension::VariableDoc(
-  "jfa_base",
-  ":py:class:`bob.learn.misc.JFABase`",
-  "The JFABase attached to this machine",
-  ""
-);
-PyObject* PyBobLearnMiscJFAMachine_getJFABase(PyBobLearnMiscJFAMachineObject* self, void*){
-  BOB_TRY
-
-  boost::shared_ptr<bob::learn::misc::JFABase> jfa_base_o = self->cxx->getJFABase();
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscJFABaseObject* retval =
-    (PyBobLearnMiscJFABaseObject*)PyBobLearnMiscJFABase_Type.tp_alloc(&PyBobLearnMiscJFABase_Type, 0);
-  retval->cxx = jfa_base_o;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("jfa_base could not be read", 0)
-}
-int PyBobLearnMiscJFAMachine_setJFABase(PyBobLearnMiscJFAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscJFABase_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.JFABase`", Py_TYPE(self)->tp_name, jfa_base.name());
-    return -1;
-  }
-
-  PyBobLearnMiscJFABaseObject* jfa_base_o = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscJFABase_Type,&jfa_base_o);
-
-  self->cxx->setJFABase(jfa_base_o->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("jfa_base could not be set", -1)  
-}
-
-
-
-
-static PyGetSetDef PyBobLearnMiscJFAMachine_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscJFAMachine_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  
-  {
-   supervector_length.name(),
-   (getter)PyBobLearnMiscJFAMachine_getSupervectorLength,
-   0,
-   supervector_length.doc(),
-   0
-  },
-  
-  {
-   jfa_base.name(),
-   (getter)PyBobLearnMiscJFAMachine_getJFABase,
-   (setter)PyBobLearnMiscJFAMachine_setJFABase,
-   jfa_base.doc(),
-   0
-  },
-
-  {
-   Y.name(),
-   (getter)PyBobLearnMiscJFAMachine_getY,
-   (setter)PyBobLearnMiscJFAMachine_setY,
-   Y.doc(),
-   0
-  },
-
-  {
-   Z.name(),
-   (getter)PyBobLearnMiscJFAMachine_getZ,
-   (setter)PyBobLearnMiscJFAMachine_setZ,
-   Z.doc(),
-   0
-  },
-
-  {
-   X.name(),
-   (getter)PyBobLearnMiscJFAMachine_getX,
-   0,
-   X.doc(),
-   0
-  },
-
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the JFAMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscJFAMachine_Save(PyBobLearnMiscJFAMachineObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the JFAMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscJFAMachine_Load(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this JFAMachine with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.JFAMachine`", "A JFAMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscJFAMachine_IsSimilarTo(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscJFAMachineObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscJFAMachine_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** estimate_x ***/
-static auto estimate_x = bob::extension::FunctionDoc(
-  "estimate_x",
-  "Estimates the session offset x (LPT assumption) given GMM statistics.",
-  "Estimates x from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM", 
-  true
-)
-.add_prototype("stats,input")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics of the GMM")
-.add_parameter("input", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscJFAMachine_estimateX(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = estimate_x.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyBlitzArrayObject* input           = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMStats_Type, &stats, 
-                                                                 &PyBlitzArray_Converter,&input))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-  self->cxx->estimateX(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
-
-  BOB_CATCH_MEMBER("cannot estimate X", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** estimate_ux ***/
-static auto estimate_ux = bob::extension::FunctionDoc(
-  "estimate_ux",
-  "Estimates Ux (LPT assumption) given GMM statistics.",
-  "Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.", 
-  true
-)
-.add_prototype("stats,input")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics of the GMM")
-.add_parameter("input", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscJFAMachine_estimateUx(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = estimate_ux.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyBlitzArrayObject* input           = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMStats_Type, &stats, 
-                                                                 &PyBlitzArray_Converter,&input))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-  self->cxx->estimateUx(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
-
-  BOB_CATCH_MEMBER("cannot estimate Ux", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** forward_ux ***/
-static auto forward_ux = bob::extension::FunctionDoc(
-  "forward_ux",
-  "Computes a score for the given UBM statistics and given the Ux vector",
-  "", 
-  true
-)
-.add_prototype("stats,ux")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input")
-.add_parameter("ux", "array_like <float, 1D>", "Input vector");
-static PyObject* PyBobLearnMiscJFAMachine_ForwardUx(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = forward_ux.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  PyBlitzArrayObject* ux_input        = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscGMMStats_Type, &stats, 
-                                                                 &PyBlitzArray_Converter,&ux_input))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto ux_input_ = make_safe(ux_input);
-  double score = self->cxx->forward(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(ux_input));
-  
-  return Py_BuildValue("d", score);
-  BOB_CATCH_MEMBER("cannot forward_ux", 0)
-
-}
-
-
-/*** forward ***/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
-  "Execute the machine",
-  "", 
-  true
-)
-.add_prototype("stats")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnMiscJFAMachine_Forward(PyBobLearnMiscJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  char** kwlist = forward.kwlist(0);
-
-  PyBobLearnMiscGMMStatsObject* stats = 0;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscGMMStats_Type, &stats))
-    Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  double score = self->cxx->forward(*stats->cxx);
-
-  return Py_BuildValue("d", score);
-  BOB_CATCH_MEMBER("cannot forward", 0)
-
-}
-
-
-static PyMethodDef PyBobLearnMiscJFAMachine_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  
-  {
-    estimate_x.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_estimateX,
-    METH_VARARGS|METH_KEYWORDS,
-    estimate_x.doc()
-  },
-  
-  {
-    estimate_ux.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_estimateUx,
-    METH_VARARGS|METH_KEYWORDS,
-    estimate_ux.doc()
-  },
-
-  {
-    forward_ux.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_ForwardUx,
-    METH_VARARGS|METH_KEYWORDS,
-    forward_ux.doc()
-  },
-/*
-  {
-    forward.name(),
-    (PyCFunction)PyBobLearnMiscJFAMachine_Forward,
-    METH_VARARGS|METH_KEYWORDS,
-    forward.doc()
-  },*/
-
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the JFA type struct; will be initialized later
-PyTypeObject PyBobLearnMiscJFAMachine_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscJFAMachine(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscJFAMachine_Type.tp_name      = JFAMachine_doc.name();
-  PyBobLearnMiscJFAMachine_Type.tp_basicsize = sizeof(PyBobLearnMiscJFAMachineObject);
-  PyBobLearnMiscJFAMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscJFAMachine_Type.tp_doc       = JFAMachine_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscJFAMachine_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscJFAMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscJFAMachine_init);
-  PyBobLearnMiscJFAMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscJFAMachine_delete);
-  PyBobLearnMiscJFAMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscJFAMachine_RichCompare);
-  PyBobLearnMiscJFAMachine_Type.tp_methods     = PyBobLearnMiscJFAMachine_methods;
-  PyBobLearnMiscJFAMachine_Type.tp_getset      = PyBobLearnMiscJFAMachine_getseters;
-  PyBobLearnMiscJFAMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnMiscJFAMachine_Forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscJFAMachine_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscJFAMachine_Type);
-  return PyModule_AddObject(module, "JFAMachine", (PyObject*)&PyBobLearnMiscJFAMachine_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/jfa_trainer.cpp b/bob/learn/misc/jfa_trainer.cpp
deleted file mode 100644
index 8d46e94f35e6311847b795e0380b89a4af75d124..0000000000000000000000000000000000000000
--- a/bob/learn/misc/jfa_trainer.cpp
+++ /dev/null
@@ -1,1013 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Sun 01 Fev 09:40:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-#include <boost/make_shared.hpp>
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static int extract_GMMStats_1d(PyObject *list,
-                             std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
-    PyBobLearnMiscGMMStatsObject* stats;
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
-      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
-      return -1;
-    }
-    training_data.push_back(stats->cxx);
-  }
-  return 0;
-}
-
-static int extract_GMMStats_2d(PyObject *list,
-                             std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++)
-  {
-    PyObject* another_list;
-    PyArg_Parse(PyList_GetItem(list, i), "O!", &PyList_Type, &another_list);
-
-    std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > another_training_data;
-    for (int j=0; j<PyList_GET_SIZE(another_list); j++){
-
-      PyBobLearnMiscGMMStatsObject* stats;
-      if (!PyArg_Parse(PyList_GetItem(another_list, j), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
-        PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
-        return -1;
-      }
-      another_training_data.push_back(stats->cxx);
-    }
-    training_data.push_back(another_training_data);
-  }
-  return 0;
-}
-
-template <int N>
-static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
-{
-  PyObject* list = PyList_New(vec.size());
-  for(size_t i=0; i<vec.size(); i++){
-    blitz::Array<double,N> numpy_array = vec[i];
-    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
-    PyList_SET_ITEM(list, i, numpy_py_object);
-  }
-  return list;
-}
-
-template <int N>
-int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++)
-  {
-    PyBlitzArrayObject* blitz_object; 
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
-      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
-      return -1;
-    }
-    auto blitz_object_ = make_safe(blitz_object);
-    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
-  }
-  return 0;
-}
-
-
-
-static auto JFATrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".JFATrainer",
-  "JFATrainer"
-  "References: [Vogt2008,McCool2013]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Constructor. Builds a new JFATrainer",
-    "",
-    true
-  )
-  .add_prototype("other","")
-  .add_prototype("","")
-  .add_parameter("other", ":py:class:`bob.learn.misc.JFATrainer`", "A JFATrainer object to be copied.")
-);
-
-
-static int PyBobLearnMiscJFATrainer_init_copy(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = JFATrainer_doc.kwlist(0);
-  PyBobLearnMiscJFATrainerObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscJFATrainer_Type, &o)){
-    JFATrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::JFATrainer(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscJFATrainer_init(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  switch(nargs){
-    case 0:{
-      self->cxx.reset(new bob::learn::misc::JFATrainer());
-      return 0;
-    }
-    case 1:{
-      // If the constructor input is JFATrainer object
-      return PyBobLearnMiscJFATrainer_init_copy(self, args, kwargs);
-    }
-    default:{
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 and 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      JFATrainer_doc.print_usage();
-      return -1;
-    }
-  }
-  BOB_CATCH_MEMBER("cannot create JFATrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscJFATrainer_delete(PyBobLearnMiscJFATrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscJFATrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscJFATrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscJFATrainer_RichCompare(PyBobLearnMiscJFATrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscJFATrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscJFATrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare JFATrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-static auto acc_v_a1 = bob::extension::VariableDoc(
-  "acc_v_a1",
-  "array_like <float, 3D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_acc_v_a1(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccVA1());
-  BOB_CATCH_MEMBER("acc_v_a1 could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_acc_v_a1(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_v_a1.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_v_a1");
-  if (!b) return -1;
-  self->cxx->setAccVA1(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_v_a1 could not be set", -1)
-}
-
-
-static auto acc_v_a2 = bob::extension::VariableDoc(
-  "acc_v_a2",
-  "array_like <float, 2D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_acc_v_a2(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccVA2());
-  BOB_CATCH_MEMBER("acc_v_a2 could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_acc_v_a2(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_v_a2.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_v_a2");
-  if (!b) return -1;
-  self->cxx->setAccVA2(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_v_a2 could not be set", -1)
-}
-
-
-static auto acc_u_a1 = bob::extension::VariableDoc(
-  "acc_u_a1",
-  "array_like <float, 3D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_acc_u_a1(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA1());
-  BOB_CATCH_MEMBER("acc_u_a1 could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_acc_u_a1(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_u_a1.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_u_a1");
-  if (!b) return -1;
-  self->cxx->setAccUA1(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_u_a1 could not be set", -1)
-}
-
-
-static auto acc_u_a2 = bob::extension::VariableDoc(
-  "acc_u_a2",
-  "array_like <float, 2D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_acc_u_a2(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA2());
-  BOB_CATCH_MEMBER("acc_u_a2 could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_acc_u_a2(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_u_a2.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_u_a2");
-  if (!b) return -1;
-  self->cxx->setAccUA2(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_u_a2 could not be set", -1)
-}
-
-
-static auto acc_d_a1 = bob::extension::VariableDoc(
-  "acc_d_a1",
-  "array_like <float, 1D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_acc_d_a1(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccDA1());
-  BOB_CATCH_MEMBER("acc_d_a1 could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_acc_d_a1(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_d_a1.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_d_a1");
-  if (!b) return -1;
-  self->cxx->setAccDA1(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_d_a1 could not be set", -1)
-}
-
-
-static auto acc_d_a2 = bob::extension::VariableDoc(
-  "acc_d_a2",
-  "array_like <float, 1D>",
-  "Accumulator updated during the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_acc_d_a2(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccDA2());
-  BOB_CATCH_MEMBER("acc_d_a2 could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_acc_d_a2(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_d_a2.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_d_a2");
-  if (!b) return -1;
-  self->cxx->setAccDA2(*b);
-  return 0;
-  BOB_CATCH_MEMBER("acc_d_a2 could not be set", -1)
-}
-
-
-static auto __X__ = bob::extension::VariableDoc(
-  "__X__",
-  "list",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_X(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return vector_as_list(self->cxx->getX());
-  BOB_CATCH_MEMBER("__X__ could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_X(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  if (!PyList_Check(value)){
-    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __X__.name());
-    return -1;
-  }
-    
-  std::vector<blitz::Array<double,2> > data;
-  if(list_as_vector(value ,data)==0){
-    self->cxx->setX(data);
-  }
-    
-  return 0;
-  BOB_CATCH_MEMBER("__X__ could not be written", 0)
-}
-
-
-
-static auto __Y__ = bob::extension::VariableDoc(
-  "__Y__",
-  "list",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_Y(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return vector_as_list(self->cxx->getY());
-  BOB_CATCH_MEMBER("__Y__ could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_Y(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  if (!PyList_Check(value)){
-    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Y__.name());
-    return -1;
-  }
-    
-  std::vector<blitz::Array<double,1> > data;
-  if(list_as_vector(value ,data)==0){
-    self->cxx->setY(data);
-  }
-    
-  return 0;
-  BOB_CATCH_MEMBER("__Y__ could not be written", 0)
-}
-
-
-
-static auto __Z__ = bob::extension::VariableDoc(
-  "__Z__",
-  "list",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_get_Z(PyBobLearnMiscJFATrainerObject* self, void*){
-  BOB_TRY
-  return vector_as_list(self->cxx->getZ());
-  BOB_CATCH_MEMBER("__Z__ could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_set_Z(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  if (!PyList_Check(value)){
-    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Z__.name());
-    return -1;
-  }
-    
-  std::vector<blitz::Array<double,1> > data;
-  if(list_as_vector(value ,data)==0){
-    self->cxx->setZ(data);
-  }
-    
-  return 0;
-  BOB_CATCH_MEMBER("__Z__ could not be written", 0)
-}
-
-
-
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnMiscJFATrainer_getRng(PyBobLearnMiscJFATrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("O", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnMiscJFATrainer_setRng(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-static PyGetSetDef PyBobLearnMiscJFATrainer_getseters[] = { 
-  {
-   acc_v_a1.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_acc_v_a1,
-   (setter)PyBobLearnMiscJFATrainer_get_acc_v_a1,
-   acc_v_a1.doc(),
-   0
-  },
-  {
-   acc_v_a2.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_acc_v_a2,
-   (setter)PyBobLearnMiscJFATrainer_get_acc_v_a2,
-   acc_v_a2.doc(),
-   0
-  },
-  {
-   acc_u_a1.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_acc_u_a1,
-   (setter)PyBobLearnMiscJFATrainer_get_acc_u_a1,
-   acc_u_a1.doc(),
-   0
-  },
-  {
-   acc_u_a2.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_acc_u_a2,
-   (setter)PyBobLearnMiscJFATrainer_get_acc_u_a2,
-   acc_u_a2.doc(),
-   0
-  },
-  {
-   acc_d_a1.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_acc_d_a1,
-   (setter)PyBobLearnMiscJFATrainer_get_acc_d_a1,
-   acc_d_a1.doc(),
-   0
-  },
-  {
-   acc_d_a2.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_acc_d_a2,
-   (setter)PyBobLearnMiscJFATrainer_get_acc_d_a2,
-   acc_d_a2.doc(),
-   0
-  },
-  {
-   rng.name(),
-   (getter)PyBobLearnMiscJFATrainer_getRng,
-   (setter)PyBobLearnMiscJFATrainer_setRng,
-   rng.doc(),
-   0
-  },
-  {
-   __X__.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_X,
-   (setter)PyBobLearnMiscJFATrainer_set_X,
-   __X__.doc(),
-   0
-  },
-  {
-   __Y__.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_Y,
-   (setter)PyBobLearnMiscJFATrainer_set_Y,
-   __Y__.doc(),
-   0
-  },
-  {
-   __Z__.name(),
-   (getter)PyBobLearnMiscJFATrainer_get_Z,
-   (setter)PyBobLearnMiscJFATrainer_set_Z,
-   __Z__.doc(),
-   0
-  },
-  
-  
-
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_initialize(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->initialize(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** e_step1 ***/
-static auto e_step1 = bob::extension::FunctionDoc(
-  "e_step1",
-  "Call the 1st e-step procedure (for the V subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_e_step1(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  //Parses input arguments in a single shot
-  char** kwlist = e_step1.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->eStep1(*jfa_base->cxx, training_data);
-
-
-  BOB_CATCH_MEMBER("cannot perform the e_step1 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** m_step1 ***/
-static auto m_step1 = bob::extension::FunctionDoc(
-  "m_step1",
-  "Call the 1st m-step procedure (for the V subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_m_step1(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = m_step1.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->mStep1(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the m_step1 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** finalize1 ***/
-static auto finalize1 = bob::extension::FunctionDoc(
-  "finalize1",
-  "Call the 1st finalize procedure (for the V subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_finalize1(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  //Parses input arguments in a single shot
-  char** kwlist = finalize1.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->finalize1(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the finalize1 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** e_step2 ***/
-static auto e_step2 = bob::extension::FunctionDoc(
-  "e_step2",
-  "Call the 2nd e-step procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_e_step2(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = e_step2.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->eStep2(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the e_step2 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** m_step2 ***/
-static auto m_step2 = bob::extension::FunctionDoc(
-  "m_step2",
-  "Call the 2nd m-step procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_m_step2(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot 
-  char** kwlist = m_step2.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->mStep2(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the m_step2 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** finalize2 ***/
-static auto finalize2 = bob::extension::FunctionDoc(
-  "finalize2",
-  "Call the 2nd finalize procedure (for the U subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_finalize2(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = finalize2.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->finalize2(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the finalize2 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** e_step3 ***/
-static auto e_step3 = bob::extension::FunctionDoc(
-  "e_step3",
-  "Call the 3rd e-step procedure (for the d subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_e_step3(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = e_step3.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->eStep3(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the e_step3 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** m_step3 ***/
-static auto m_step3 = bob::extension::FunctionDoc(
-  "m_step3",
-  "Call the 3rd m-step procedure (for the d subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_m_step3(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = m_step3.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->mStep3(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the m_step3 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** finalize3 ***/
-static auto finalize3 = bob::extension::FunctionDoc(
-  "finalize3",
-  "Call the 3rd finalize procedure (for the d subspace).",
-  "",
-  true
-)
-.add_prototype("jfa_base,stats")
-.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
-static PyObject* PyBobLearnMiscJFATrainer_finalize3(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = finalize3.kwlist(0);
-
-  PyBobLearnMiscJFABaseObject* jfa_base = 0;
-  PyObject* stats = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
-
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  if(extract_GMMStats_2d(stats ,training_data)==0)
-    self->cxx->finalize3(*jfa_base->cxx, training_data);
-
-  BOB_CATCH_MEMBER("cannot perform the finalize3 method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** enrol ***/
-static auto enrol = bob::extension::FunctionDoc(
-  "enrol",
-  "",
-  "",
-  true
-)
-.add_prototype("jfa_machine,features,n_iter","")
-.add_parameter("jfa_machine", ":py:class:`bob.learn.misc.JFAMachine`", "JFAMachine Object")
-.add_parameter("features", "list(:py:class:`bob.learn.misc.GMMStats`)`", "")
-.add_parameter("n_iter", "int", "Number of iterations");
-static PyObject* PyBobLearnMiscJFATrainer_enrol(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // Parses input arguments in a single shot
-  char** kwlist = enrol.kwlist(0);
-
-  PyBobLearnMiscJFAMachineObject* jfa_machine = 0;
-  PyObject* stats = 0;
-  int n_iter = 1;
-
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!i", kwlist, &PyBobLearnMiscJFAMachine_Type, &jfa_machine,
-                                                                  &PyList_Type, &stats, &n_iter)) Py_RETURN_NONE;
-
-  std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > training_data;
-  if(extract_GMMStats_1d(stats ,training_data)==0)
-    self->cxx->enrol(*jfa_machine->cxx, training_data, n_iter);
-
-  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-static PyMethodDef PyBobLearnMiscJFATrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    e_step1.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_e_step1,
-    METH_VARARGS|METH_KEYWORDS,
-    e_step1.doc()
-  },
-  {
-    e_step2.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_e_step2,
-    METH_VARARGS|METH_KEYWORDS,
-    e_step2.doc()
-  },
-  {
-    e_step3.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_e_step3,
-    METH_VARARGS|METH_KEYWORDS,
-    e_step3.doc()
-  },
-  {
-    m_step1.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_m_step1,
-    METH_VARARGS|METH_KEYWORDS,
-    m_step1.doc()
-  },
-  {
-    m_step2.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_m_step2,
-    METH_VARARGS|METH_KEYWORDS,
-    m_step2.doc()
-  },
-  {
-    m_step3.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_m_step3,
-    METH_VARARGS|METH_KEYWORDS,
-    m_step3.doc()
-  },
-  {
-    finalize1.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_finalize1,
-    METH_VARARGS|METH_KEYWORDS,
-    finalize1.doc()
-  },
-  {
-    finalize2.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_finalize2,
-    METH_VARARGS|METH_KEYWORDS,
-    finalize2.doc()
-  },
-  {
-    finalize3.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_finalize3,
-    METH_VARARGS|METH_KEYWORDS,
-    finalize3.doc()
-  },
-  {
-    enrol.name(),
-    (PyCFunction)PyBobLearnMiscJFATrainer_enrol,
-    METH_VARARGS|METH_KEYWORDS,
-    enrol.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscJFATrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscJFATrainer(PyObject* module)
-{
-  // initialize the type JFATrainer
-  PyBobLearnMiscJFATrainer_Type.tp_name      = JFATrainer_doc.name();
-  PyBobLearnMiscJFATrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscJFATrainerObject);
-  PyBobLearnMiscJFATrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
-  PyBobLearnMiscJFATrainer_Type.tp_doc       = JFATrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscJFATrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscJFATrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscJFATrainer_init);
-  PyBobLearnMiscJFATrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscJFATrainer_delete);
-  PyBobLearnMiscJFATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscJFATrainer_RichCompare);
-  PyBobLearnMiscJFATrainer_Type.tp_methods      = PyBobLearnMiscJFATrainer_methods;
-  PyBobLearnMiscJFATrainer_Type.tp_getset       = PyBobLearnMiscJFATrainer_getseters;
-  //PyBobLearnMiscJFATrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscJFATrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscJFATrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscJFATrainer_Type);
-  return PyModule_AddObject(module, "_JFATrainer", (PyObject*)&PyBobLearnMiscJFATrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/kmeans_machine.cpp b/bob/learn/misc/kmeans_machine.cpp
deleted file mode 100644
index 9febd70e4f8fa9d02dc83df845973457514cd9c8..0000000000000000000000000000000000000000
--- a/bob/learn/misc/kmeans_machine.cpp
+++ /dev/null
@@ -1,768 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Fri 26 Dec 16:18:00 2014
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto KMeansMachine_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".KMeansMachine",
-  "This class implements a k-means classifier.\n"
-  "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a KMeansMachine",
-    "",
-    true
-  )
-  .add_prototype("n_means,n_inputs","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-  .add_prototype("","")
-
-  .add_parameter("n_means", "int", "Number of means")
-  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
-  .add_parameter("other", ":py:class:`bob.learn.misc.KMeansMachine`", "A KMeansMachine object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscKMeansMachine_init_number(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = KMeansMachine_doc.kwlist(0);
-  int n_inputs    = 1;
-  int n_means = 1;
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_means, &n_inputs))
-    return -1;
-
-  if(n_means < 0){
-    PyErr_Format(PyExc_TypeError, "means argument must be greater than or equal to zero");
-    KMeansMachine_doc.print_usage();
-    return -1;
-  }
-
-  if(n_inputs < 0){
-    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
-    KMeansMachine_doc.print_usage();
-    return -1;
-   }
-
-  self->cxx.reset(new bob::learn::misc::KMeansMachine(n_means, n_inputs));
-  return 0;
-}
-
-
-static int PyBobLearnMiscKMeansMachine_init_copy(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = KMeansMachine_doc.kwlist(1);
-  PyBobLearnMiscKMeansMachineObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscKMeansMachine_Type, &tt)){
-    KMeansMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::KMeansMachine(*tt->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscKMeansMachine_init_hdf5(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = KMeansMachine_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    KMeansMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::KMeansMachine(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscKMeansMachine_init(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-  
-  switch (nargs) {
-
-    case 0: //default initializer ()
-      self->cxx.reset(new bob::learn::misc::KMeansMachine());
-      return 0;
-
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is Gaussian object
-     if (PyBobLearnMiscKMeansMachine_Check(arg))
-       return PyBobLearnMiscKMeansMachine_init_copy(self, args, kwargs);
-      // If the constructor input is a HDF5
-     else if (PyBobIoHDF5File_Check(arg))
-       return PyBobLearnMiscKMeansMachine_init_hdf5(self, args, kwargs);
-    }
-    case 2:
-      return PyBobLearnMiscKMeansMachine_init_number(self, args, kwargs);
-    default:
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0, 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      KMeansMachine_doc.print_usage();
-      return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create KMeansMachine", 0)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscKMeansMachine_delete(PyBobLearnMiscKMeansMachineObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscKMeansMachine_RichCompare(PyBobLearnMiscKMeansMachineObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscKMeansMachine_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscKMeansMachineObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare KMeansMachine objects", 0)
-}
-
-int PyBobLearnMiscKMeansMachine_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscKMeansMachine_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int)",
-  "A tuple that represents the number of means and dimensionality of the feature vector``(n_means, dim)``.",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansMachine_getShape(PyBobLearnMiscKMeansMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i)", self->cxx->getNMeans(), self->cxx->getNInputs());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-/***** MEAN *****/
-
-static auto means = bob::extension::VariableDoc(
-  "means",
-  "array_like <float, 2D>",
-  "The means",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansMachine_getMeans(PyBobLearnMiscKMeansMachineObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMeans());
-  BOB_CATCH_MEMBER("means could not be read", 0)
-}
-int PyBobLearnMiscKMeansMachine_setMeans(PyBobLearnMiscKMeansMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, means.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "means");
-  if (!b) return -1;
-  self->cxx->setMeans(*b);
-  return 0;
-  BOB_CATCH_MEMBER("means could not be set", -1)
-}
-
-
-static PyGetSetDef PyBobLearnMiscKMeansMachine_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscKMeansMachine_getShape,
-   0,
-   shape.doc(),
-   0
-  },
-  {
-   means.name(),
-   (getter)PyBobLearnMiscKMeansMachine_getMeans,
-   (setter)PyBobLearnMiscKMeansMachine_setMeans,
-   means.doc(),
-   0
-  },
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the KMeansMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscKMeansMachine_Save(PyBobLearnMiscKMeansMachineObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the KMeansMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscKMeansMachine_Load(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this KMeansMachine with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.KMeansMachine`", "A KMeansMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscKMeansMachine_IsSimilarTo(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscKMeansMachineObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscKMeansMachine_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Allocates space for the statistics and resets to zero.",
-  0,
-  true
-)
-.add_prototype("n_means,n_inputs")
-.add_parameter("n_means", "int", "Number of means")
-.add_parameter("n_inputs", "int", "Dimensionality of the feature vector");
-static PyObject* PyBobLearnMiscKMeansMachine_resize(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int n_means = 0;
-  int n_inputs = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_means, &n_inputs)) Py_RETURN_NONE;
-
-  if (n_means <= 0){
-    PyErr_Format(PyExc_TypeError, "n_means must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-  if (n_inputs <= 0){
-    PyErr_Format(PyExc_TypeError, "n_inputs must be greater than zero");
-    resize.print_usage();
-    return 0;
-  }
-
-  self->cxx->resize(n_means, n_inputs);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-/*** get_mean ***/
-static auto get_mean = bob::extension::FunctionDoc(
-  "get_mean",
-  "Get the i'th mean.",
-  ".. note:: An exception is thrown if i is out of range.", 
-  true
-)
-.add_prototype("i")
-.add_parameter("i", "int", "Index of the mean")
-.add_return("mean","array_like <float, 1D>","Mean array");
-static PyObject* PyBobLearnMiscKMeansMachine_get_mean(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_mean.kwlist(0);
-
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
- 
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMean(i));
-
-  BOB_CATCH_MEMBER("cannot get the mean", 0)
-}
-
-
-/*** set_mean ***/
-static auto set_mean = bob::extension::FunctionDoc(
-  "set_mean",
-  "Set the i'th mean.",
-  ".. note:: An exception is thrown if i is out of range.", 
-  true
-)
-.add_prototype("i,mean")
-.add_parameter("i", "int", "Index of the mean")
-.add_parameter("mean", "array_like <float, 1D>", "Mean array");
-static PyObject* PyBobLearnMiscKMeansMachine_set_mean(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = set_mean.kwlist(0);
-
-  int i = 0;
-  PyBlitzArrayObject* mean = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &mean)) Py_RETURN_NONE;
-  
-  //protects acquired resources through this scope
-  auto mean_ = make_safe(mean);
-
-  //setting the mean
-  self->cxx->setMean(i, *PyBlitzArrayCxx_AsBlitz<double,1>(mean));
-
-  BOB_CATCH_MEMBER("cannot set the mean", 0)
-  
-  Py_RETURN_NONE;
-}
-
-
-
-/*** get_distance_from_mean ***/
-static auto get_distance_from_mean = bob::extension::FunctionDoc(
-  "get_distance_from_mean",
-  "Return the power of two of the square Euclidean distance of the sample, x, to the i'th mean.",
-  ".. note:: An exception is thrown if i is out of range.", 
-  true
-)
-.add_prototype("input,i","output")
-.add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
-.add_parameter("i", "int", "The index of the mean")
-.add_return("output","float","Square Euclidean distance of the sample, x, to the i'th mean");
-static PyObject* PyBobLearnMiscKMeansMachine_get_distance_from_mean(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_distance_from_mean.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&i", kwlist, &PyBlitzArray_Converter, &input, &i)){ 
-    Py_RETURN_NONE;
-  }
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  double output = self->cxx->getDistanceFromMean(*PyBlitzArrayCxx_AsBlitz<double,1>(input),i);
-  return Py_BuildValue("d", output);
-
-  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
-}
-
-
-/*** get_closest_mean ***/
-static auto get_closest_mean = bob::extension::FunctionDoc(
-  "get_closest_mean",
-  "Calculate the index of the mean that is closest (in terms of square Euclidean distance) to the data sample, x.",
-  "",
-  true
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
-.add_return("output", "(int, int)", "Tuple containing the closest mean and the minimum distance from the input");
-static PyObject* PyBobLearnMiscKMeansMachine_get_closest_mean(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_closest_mean.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  size_t closest_mean = 0;
-  double min_distance = -1;   
-  self->cxx->getClosestMean(*PyBlitzArrayCxx_AsBlitz<double,1>(input), closest_mean, min_distance);
-    
-  return Py_BuildValue("(i,d)", closest_mean, min_distance);
-
-  BOB_CATCH_MEMBER("cannot compute the closest mean", 0)
-}
-
-
-/*** get_min_distance ***/
-static auto get_min_distance = bob::extension::FunctionDoc(
-  "get_min_distance",
-  "Output the minimum (Square Euclidean) distance between the input and the closest mean ",
-  "",
-  true
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
-.add_return("output", "double", "The minimum distance");
-static PyObject* PyBobLearnMiscKMeansMachine_get_min_distance(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_min_distance.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  double min_distance = 0;   
-  min_distance = self->cxx->getMinDistance(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
-
-  return Py_BuildValue("d", min_distance);
-
-  BOB_CATCH_MEMBER("cannot compute the min distance", 0)
-}
-
-/**** get_variances_and_weights_for_each_cluster ***/
-static auto get_variances_and_weights_for_each_cluster = bob::extension::FunctionDoc(
-  "get_variances_and_weights_for_each_cluster",
-  "For each mean, find the subset of the samples that is closest to that mean, and calculate"
-  " 1) the variance of that subset (the cluster variance)" 
-  " 2) the proportion of the samples represented by that subset (the cluster weight)",
-  "",
-  true
-)
-.add_prototype("input","output")
-.add_parameter("input", "array_like <float, 2D>", "The data sample (feature vector)")
-.add_return("output", "(array_like <float, 2D>, array_like <float, 1D>)", "A tuple with the variances and the weights respectively");
-static PyObject* PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist =  get_variances_and_weights_for_each_cluster.kwlist(0);
-
-  PyBlitzArrayObject* input = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto input_ = make_safe(input);
-
-  blitz::Array<double,2> variances(self->cxx->getNMeans(),self->cxx->getNInputs());
-  blitz::Array<double,1> weights(self->cxx->getNMeans());
-  
-  self->cxx->getVariancesAndWeightsForEachCluster(*PyBlitzArrayCxx_AsBlitz<double,2>(input),variances,weights);
-
-  return Py_BuildValue("(O,O)",PyBlitzArrayCxx_AsConstNumpy(variances), PyBlitzArrayCxx_AsConstNumpy(weights));
-
-  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
-}
-
-
-/**** __get_variances_and_weights_for_each_cluster_init__ ***/
-static auto __get_variances_and_weights_for_each_cluster_init__ = bob::extension::FunctionDoc(
-  "__get_variances_and_weights_for_each_cluster_init__",
-  "Methods consecutively called by getVariancesAndWeightsForEachCluster()"
-  "This should help for the parallelization on several nodes by splitting the data and calling"
-  "getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum"
-  "with the m_cache_means, variances, and weights variables before performing the merge on one"
-  "node using getVariancesAndWeightsForEachClusterFin().",
-  "",
-  true
-)
-.add_prototype("variances,weights","")
-.add_parameter("variances", "array_like <float, 2D>", "Variance array")
-.add_parameter("weights", "array_like <float, 1D>", "Weight array");
-static PyObject* PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster_init(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist =  __get_variances_and_weights_for_each_cluster_init__.kwlist(0);
-
-  PyBlitzArrayObject* variances = 0;
-  PyBlitzArrayObject* weights   = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &variances,  &PyBlitzArray_Converter, &weights)) Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto weights_   = make_safe(weights);
-  auto variances_ = make_safe(variances);
-
-  self->cxx->getVariancesAndWeightsForEachClusterInit(*PyBlitzArrayCxx_AsBlitz<double,2>(variances), *PyBlitzArrayCxx_AsBlitz<double,1>(weights));
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
-}
-
-
-/**** __get_variances_and_weights_for_each_cluster_acc__ ***/
-static auto __get_variances_and_weights_for_each_cluster_acc__ = bob::extension::FunctionDoc(
-  "__get_variances_and_weights_for_each_cluster_acc__",
-  "Methods consecutively called by getVariancesAndWeightsForEachCluster()"
-  "This should help for the parallelization on several nodes by splitting the data and calling"
-  "getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum"
-  "with the m_cache_means, variances, and weights variables before performing the merge on one"
-  "node using getVariancesAndWeightsForEachClusterFin().",
-  "",
-  true
-)
-.add_prototype("data,variances,weights","")
-.add_parameter("data", "array_like <float, 2D>", "data array")
-.add_parameter("variances", "array_like <float, 2D>", "Variance array")
-.add_parameter("weights", "array_like <float, 1D>", "Weight array");
-static PyObject* PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster_acc(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist =  __get_variances_and_weights_for_each_cluster_acc__.kwlist(0);
-
-  PyBlitzArrayObject* data      = 0;
-  PyBlitzArrayObject* variances = 0;
-  PyBlitzArrayObject* weights   = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&", kwlist, &PyBlitzArray_Converter, &data, &PyBlitzArray_Converter, &variances, &PyBlitzArray_Converter, &weights)) Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto data_      = make_safe(data);
-  auto weights_   = make_safe(weights);
-  auto variances_ = make_safe(variances);
-
-  self->cxx->getVariancesAndWeightsForEachClusterAcc(*PyBlitzArrayCxx_AsBlitz<double,2>(data), *PyBlitzArrayCxx_AsBlitz<double,2>(variances), *PyBlitzArrayCxx_AsBlitz<double,1>(weights));
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
-}
-
-
-/**** __get_variances_and_weights_for_each_cluster_fin__ ***/
-static auto __get_variances_and_weights_for_each_cluster_fin__ = bob::extension::FunctionDoc(
-  "__get_variances_and_weights_for_each_cluster_fin__",
-  "Methods consecutively called by getVariancesAndWeightsForEachCluster()"
-  "This should help for the parallelization on several nodes by splitting the data and calling"
-  "getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum"
-  "with the m_cache_means, variances, and weights variables before performing the merge on one"
-  "node using getVariancesAndWeightsForEachClusterFin().",
-  "",
-  true
-)
-.add_prototype("variances,weights","")
-.add_parameter("variances", "array_like <float, 2D>", "Variance array")
-.add_parameter("weights", "array_like <float, 1D>", "Weight array");
-static PyObject* PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster_fin(PyBobLearnMiscKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist =  __get_variances_and_weights_for_each_cluster_fin__.kwlist(0);
-
-  PyBlitzArrayObject* variances = 0;
-  PyBlitzArrayObject* weights   = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &variances,  &PyBlitzArray_Converter, &weights)) Py_RETURN_NONE;
-
-  //protects acquired resources through this scope
-  auto weights_   = make_safe(weights);
-  auto variances_ = make_safe(variances);
-
-  self->cxx->getVariancesAndWeightsForEachClusterFin(*PyBlitzArrayCxx_AsBlitz<double,2>(variances), *PyBlitzArrayCxx_AsBlitz<double,1>(weights));
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
-}
-
-
-static PyMethodDef PyBobLearnMiscKMeansMachine_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },  
-  {
-    get_mean.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_mean,
-    METH_VARARGS|METH_KEYWORDS,
-    get_mean.doc()
-  },  
-  {
-    set_mean.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_set_mean,
-    METH_VARARGS|METH_KEYWORDS,
-    set_mean.doc()
-  },  
-  {
-    get_distance_from_mean.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_distance_from_mean,
-    METH_VARARGS|METH_KEYWORDS,
-    get_distance_from_mean.doc()
-  },  
-  {
-    get_closest_mean.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_closest_mean,
-    METH_VARARGS|METH_KEYWORDS,
-    get_closest_mean.doc()
-  },  
-  {
-    get_min_distance.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_min_distance,
-    METH_VARARGS|METH_KEYWORDS,
-    get_min_distance.doc()
-  },  
-  {
-    get_variances_and_weights_for_each_cluster.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster,
-    METH_VARARGS|METH_KEYWORDS,
-    get_variances_and_weights_for_each_cluster.doc()
-  },  
-  {
-    __get_variances_and_weights_for_each_cluster_init__.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster_init,
-    METH_VARARGS|METH_KEYWORDS,
-    __get_variances_and_weights_for_each_cluster_init__.doc()
-  },  
-  {
-    __get_variances_and_weights_for_each_cluster_acc__.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster_acc,
-    METH_VARARGS|METH_KEYWORDS,
-    __get_variances_and_weights_for_each_cluster_acc__.doc()
-  },  
-  {
-    __get_variances_and_weights_for_each_cluster_fin__.name(),
-    (PyCFunction)PyBobLearnMiscKMeansMachine_get_variances_and_weights_for_each_cluster_fin,
-    METH_VARARGS|METH_KEYWORDS,
-    __get_variances_and_weights_for_each_cluster_fin__.doc()
-  },  
-
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscKMeansMachine_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscKMeansMachine(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscKMeansMachine_Type.tp_name = KMeansMachine_doc.name();
-  PyBobLearnMiscKMeansMachine_Type.tp_basicsize = sizeof(PyBobLearnMiscKMeansMachineObject);
-  PyBobLearnMiscKMeansMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscKMeansMachine_Type.tp_doc = KMeansMachine_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscKMeansMachine_Type.tp_new = PyType_GenericNew;
-  PyBobLearnMiscKMeansMachine_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnMiscKMeansMachine_init);
-  PyBobLearnMiscKMeansMachine_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnMiscKMeansMachine_delete);
-  PyBobLearnMiscKMeansMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscKMeansMachine_RichCompare);
-  PyBobLearnMiscKMeansMachine_Type.tp_methods = PyBobLearnMiscKMeansMachine_methods;
-  PyBobLearnMiscKMeansMachine_Type.tp_getset = PyBobLearnMiscKMeansMachine_getseters;
-  //PyBobLearnMiscGMMMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscGMMMachine_loglikelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscKMeansMachine_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscKMeansMachine_Type);
-  return PyModule_AddObject(module, "KMeansMachine", (PyObject*)&PyBobLearnMiscKMeansMachine_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/kmeans_trainer.cpp b/bob/learn/misc/kmeans_trainer.cpp
deleted file mode 100644
index c31e3df014bb17a31a907a3335cf8c6c99f7b358..0000000000000000000000000000000000000000
--- a/bob/learn/misc/kmeans_trainer.cpp
+++ /dev/null
@@ -1,553 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Tue 13 Jan 16:50:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-// InitializationMethod type conversion
-static const std::map<std::string, bob::learn::misc::KMeansTrainer::InitializationMethod> IM = {{"RANDOM",  bob::learn::misc::KMeansTrainer::InitializationMethod::RANDOM}, {"RANDOM_NO_DUPLICATE", bob::learn::misc::KMeansTrainer::InitializationMethod::RANDOM_NO_DUPLICATE}, {"KMEANS_PLUS_PLUS", bob::learn::misc::KMeansTrainer::InitializationMethod::KMEANS_PLUS_PLUS}};
-static inline bob::learn::misc::KMeansTrainer::InitializationMethod string2IM(const std::string& o){            /* converts string to InitializationMethod type */
-  auto it = IM.find(o);
-  if (it == IM.end()) throw std::runtime_error("The given InitializationMethod '" + o + "' is not known; choose one of ('RANDOM', 'RANDOM_NO_DUPLICATE', 'KMEANS_PLUS_PLUS')");
-  else return it->second;
-}
-static inline const std::string& IM2string(bob::learn::misc::KMeansTrainer::InitializationMethod o){            /* converts InitializationMethod type to string */
-  for (auto it = IM.begin(); it != IM.end(); ++it) if (it->second == o) return it->first;
-  throw std::runtime_error("The given InitializationMethod type is not known");
-}
-
-
-static auto KMeansTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX "._KMeansTrainer",
-  "Trains a KMeans machine."
-  "This class implements the expectation-maximization algorithm for a k-means machine."
-  "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
-  "It uses a random initialization of the means followed by the expectation-maximization algorithm"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a KMeansTrainer",
-    "",
-    true
-  )
-  .add_prototype("initialization_method","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("initialization_method", "str", "The initialization method of the means")
-  .add_parameter("other", ":py:class:`bob.learn.misc.KMeansTrainer`", "A KMeansTrainer object to be copied.")
-
-);
-
-
-static int PyBobLearnMiscKMeansTrainer_init_copy(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = KMeansTrainer_doc.kwlist(1);
-  PyBobLearnMiscKMeansTrainerObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscKMeansTrainer_Type, &tt)){
-    KMeansTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::KMeansTrainer(*tt->cxx));
-  return 0;
-}
-
-static int PyBobLearnMiscKMeansTrainer_init_str(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = KMeansTrainer_doc.kwlist(0);
-  char* value;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &value)){
-    KMeansTrainer_doc.print_usage();
-    return -1;
-  }
-  self->cxx.reset(new bob::learn::misc::KMeansTrainer(string2IM(std::string(value))));
-  return 0;
-}
-
-
-static int PyBobLearnMiscKMeansTrainer_init(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  switch (nargs) {
-
-    case 0:{ //default initializer ()
-      self->cxx.reset(new bob::learn::misc::KMeansTrainer());
-      return 0;
-    }
-    case 1:{
-      //Reading the input argument
-      PyObject* arg = 0;
-      if (PyTuple_Size(args))
-        arg = PyTuple_GET_ITEM(args, 0);
-      else {
-        PyObject* tmp = PyDict_Values(kwargs);
-        auto tmp_ = make_safe(tmp);
-        arg = PyList_GET_ITEM(tmp, 0);
-      }
-
-      // If the constructor input is KMeansTrainer object
-      if (PyBobLearnMiscKMeansTrainer_Check(arg))
-        return PyBobLearnMiscKMeansTrainer_init_copy(self, args, kwargs);
-      else if(PyString_Check(arg))
-        return PyBobLearnMiscKMeansTrainer_init_str(self, args, kwargs);
-        //return PyBobLearnMiscKMeansTrainer_init_str(self, arg);
-    }
-    default:{
-      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-      KMeansTrainer_doc.print_usage();
-      return -1;
-    }
-  }
-  BOB_CATCH_MEMBER("cannot create KMeansTrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscKMeansTrainer_delete(PyBobLearnMiscKMeansTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscKMeansTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscKMeansTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscKMeansTrainer_RichCompare(PyBobLearnMiscKMeansTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscKMeansTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscKMeansTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare KMeansTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** initialization_method *****/
-static auto initialization_method = bob::extension::VariableDoc(
-  "initialization_method",
-  "str",
-  "Initialization method.",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansTrainer_getInitializationMethod(PyBobLearnMiscKMeansTrainerObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("s", IM2string(self->cxx->getInitializationMethod()).c_str());
-  BOB_CATCH_MEMBER("initialization method could not be read", 0)
-}
-int PyBobLearnMiscKMeansTrainer_setInitializationMethod(PyBobLearnMiscKMeansTrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyString_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, initialization_method.name());
-    return -1;
-  }
-  self->cxx->setInitializationMethod(string2IM(PyString_AS_STRING(value)));
-
-  return 0;
-  BOB_CATCH_MEMBER("initialization method could not be set", 0)
-}
-
-
-/***** zeroeth_order_statistics *****/
-static auto zeroeth_order_statistics = bob::extension::VariableDoc(
-  "zeroeth_order_statistics",
-  "array_like <float, 1D>",
-  "Returns the internal statistics. Useful to parallelize the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansTrainer_getZeroethOrderStatistics(PyBobLearnMiscKMeansTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZeroethOrderStats());
-  BOB_CATCH_MEMBER("zeroeth_order_statistics could not be read", 0)
-}
-int PyBobLearnMiscKMeansTrainer_setZeroethOrderStatistics(PyBobLearnMiscKMeansTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, zeroeth_order_statistics.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "zeroeth_order_statistics");
-  if (!b) return -1;
-  self->cxx->setZeroethOrderStats(*b);
-  return 0;
-  BOB_CATCH_MEMBER("zeroeth_order_statistics could not be set", -1)
-}
-
-
-/***** first_order_statistics *****/
-static auto first_order_statistics = bob::extension::VariableDoc(
-  "first_order_statistics",
-  "array_like <float, 2D>",
-  "Returns the internal statistics. Useful to parallelize the E-step",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansTrainer_getFirstOrderStatistics(PyBobLearnMiscKMeansTrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getFirstOrderStats());
-  BOB_CATCH_MEMBER("first_order_statistics could not be read", 0)
-}
-int PyBobLearnMiscKMeansTrainer_setFirstOrderStatistics(PyBobLearnMiscKMeansTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, first_order_statistics.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "first_order_statistics");
-  if (!b) return -1;
-  self->cxx->setFirstOrderStats(*b);
-  return 0;
-  BOB_CATCH_MEMBER("first_order_statistics could not be set", -1)
-}
-
-
-/***** average_min_distance *****/
-static auto average_min_distance = bob::extension::VariableDoc(
-  "average_min_distance",
-  "str",
-  "Average min (square Euclidean) distance. Useful to parallelize the E-step.",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansTrainer_getAverageMinDistance(PyBobLearnMiscKMeansTrainerObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("d", self->cxx->getAverageMinDistance());
-  BOB_CATCH_MEMBER("Average Min Distance method could not be read", 0)
-}
-int PyBobLearnMiscKMeansTrainer_setAverageMinDistance(PyBobLearnMiscKMeansTrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, average_min_distance.name());
-    return -1;
-  }
-  self->cxx->setAverageMinDistance(PyFloat_AS_DOUBLE(value));
-
-  return 0;
-  BOB_CATCH_MEMBER("Average Min Distance could not be set", 0)
-}
-
-
-
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnMiscKMeansTrainer_getRng(PyBobLearnMiscKMeansTrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("O", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnMiscKMeansTrainer_setRng(PyBobLearnMiscKMeansTrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
-
-static PyGetSetDef PyBobLearnMiscKMeansTrainer_getseters[] = { 
-  {
-   initialization_method.name(),
-   (getter)PyBobLearnMiscKMeansTrainer_getInitializationMethod,
-   (setter)PyBobLearnMiscKMeansTrainer_setInitializationMethod,
-   initialization_method.doc(),
-   0
-  },
-  {
-   zeroeth_order_statistics.name(),
-   (getter)PyBobLearnMiscKMeansTrainer_getZeroethOrderStatistics,
-   (setter)PyBobLearnMiscKMeansTrainer_setZeroethOrderStatistics,
-   zeroeth_order_statistics.doc(),
-   0
-  },
-  {
-   first_order_statistics.name(),
-   (getter)PyBobLearnMiscKMeansTrainer_getFirstOrderStatistics,
-   (setter)PyBobLearnMiscKMeansTrainer_setFirstOrderStatistics,
-   first_order_statistics.doc(),
-   0
-  },
-  {
-   average_min_distance.name(),
-   (getter)PyBobLearnMiscKMeansTrainer_getAverageMinDistance,
-   (setter)PyBobLearnMiscKMeansTrainer_setAverageMinDistance,
-   average_min_distance.doc(),
-   0
-  },
-  {
-   rng.name(),
-   (getter)PyBobLearnMiscKMeansTrainer_getRng,
-   (setter)PyBobLearnMiscKMeansTrainer_setRng,
-   rng.doc(),
-   0
-  },
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialise the means randomly",
-  "Data is split into as many chunks as there are means, then each mean is set to a random example within each chunk.",
-  true
-)
-.add_prototype("kmeans_machine,data")
-.add_parameter("kmeans_machine", ":py:class:`bob.learn.misc.KMeansMachine`", "KMeansMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscKMeansTrainer_initialize(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscKMeansMachineObject* kmeans_machine = 0;
-  PyBlitzArrayObject* data                          = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscKMeansMachine_Type, &kmeans_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->initialize(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** eStep ***/
-static auto eStep = bob::extension::FunctionDoc(
-  "eStep",
-  "Compute the eStep, which is basically the distances ",
-  "Accumulate across the dataset:"
-  " -zeroeth and first order statistics"
-  " -average (Square Euclidean) distance from the closest mean",
-  true
-)
-.add_prototype("kmeans_machine,data")
-.add_parameter("kmeans_machine", ":py:class:`bob.learn.misc.KMeansMachine`", "KMeansMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscKMeansTrainer_eStep(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = eStep.kwlist(0);
-
-  PyBobLearnMiscKMeansMachineObject* kmeans_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscKMeansMachine_Type, &kmeans_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->eStep(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-
-  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** mStep ***/
-static auto mStep = bob::extension::FunctionDoc(
-  "mStep",
-  "Updates the mean based on the statistics from the E-step",
-  0,
-  true
-)
-.add_prototype("kmeans_machine")
-.add_parameter("kmeans_machine", ":py:class:`bob.learn.misc.KMeansMachine`", "KMeansMachine Object");
-static PyObject* PyBobLearnMiscKMeansTrainer_mStep(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = mStep.kwlist(0);
-
-  PyBobLearnMiscKMeansMachineObject* kmeans_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscKMeansMachine_Type, &kmeans_machine)) Py_RETURN_NONE;
-
-  self->cxx->mStep(*kmeans_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** computeLikelihood ***/
-static auto compute_likelihood = bob::extension::FunctionDoc(
-  "compute_likelihood",
-  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
-  0,
-  true
-)
-.add_prototype("kmeans_machine")
-.add_parameter("kmeans_machine", ":py:class:`bob.learn.misc.KMeansMachine`", "KMeansMachine Object");
-static PyObject* PyBobLearnMiscKMeansTrainer_compute_likelihood(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = compute_likelihood.kwlist(0);
-
-  PyBobLearnMiscKMeansMachineObject* kmeans_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscKMeansMachine_Type, &kmeans_machine)) Py_RETURN_NONE;
-
-  double value = self->cxx->computeLikelihood(*kmeans_machine->cxx);
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
-}
-
-
-/*** reset_accumulators ***/
-static auto reset_accumulators = bob::extension::FunctionDoc(
-  "reset_accumulators",
-  "Reset the statistics accumulators to the correct size and a value of zero.",
-  0,
-  true
-)
-.add_prototype("kmeans_machine")
-.add_parameter("kmeans_machine", ":py:class:`bob.learn.misc.KMeansMachine`", "KMeansMachine Object");
-static PyObject* PyBobLearnMiscKMeansTrainer_reset_accumulators(PyBobLearnMiscKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = reset_accumulators.kwlist(0);
-
-  PyBobLearnMiscKMeansMachineObject* kmeans_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscKMeansMachine_Type, &kmeans_machine)) Py_RETURN_NONE;
-
-  bool value = self->cxx->resetAccumulators(*kmeans_machine->cxx);
-  return Py_BuildValue("b", value);
-
-  BOB_CATCH_MEMBER("cannot perform the reset_accumulators method", 0)
-}
-
-
-static PyMethodDef PyBobLearnMiscKMeansTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscKMeansTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    eStep.name(),
-    (PyCFunction)PyBobLearnMiscKMeansTrainer_eStep,
-    METH_VARARGS|METH_KEYWORDS,
-    eStep.doc()
-  },
-  {
-    mStep.name(),
-    (PyCFunction)PyBobLearnMiscKMeansTrainer_mStep,
-    METH_VARARGS|METH_KEYWORDS,
-    mStep.doc()
-  },
-  {
-    compute_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscKMeansTrainer_compute_likelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_likelihood.doc()
-  },
-  {
-    reset_accumulators.name(),
-    (PyCFunction)PyBobLearnMiscKMeansTrainer_reset_accumulators,
-    METH_VARARGS|METH_KEYWORDS,
-    reset_accumulators.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscKMeansTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscKMeansTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscKMeansTrainer_Type.tp_name = KMeansTrainer_doc.name();
-  PyBobLearnMiscKMeansTrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscKMeansTrainerObject);
-  PyBobLearnMiscKMeansTrainer_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
-  PyBobLearnMiscKMeansTrainer_Type.tp_doc = KMeansTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscKMeansTrainer_Type.tp_new = PyType_GenericNew;
-  PyBobLearnMiscKMeansTrainer_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnMiscKMeansTrainer_init);
-  PyBobLearnMiscKMeansTrainer_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnMiscKMeansTrainer_delete);
-  PyBobLearnMiscKMeansTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscKMeansTrainer_RichCompare);
-  PyBobLearnMiscKMeansTrainer_Type.tp_methods = PyBobLearnMiscKMeansTrainer_methods;
-  PyBobLearnMiscKMeansTrainer_Type.tp_getset = PyBobLearnMiscKMeansTrainer_getseters;
-  PyBobLearnMiscKMeansTrainer_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscKMeansTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscKMeansTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscKMeansTrainer_Type);
-  return PyModule_AddObject(module, "_KMeansTrainer", (PyObject*)&PyBobLearnMiscKMeansTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/linear_scoring.cpp b/bob/learn/misc/linear_scoring.cpp
deleted file mode 100644
index b5bf6e079ded2262e206f4452f70649623492eec..0000000000000000000000000000000000000000
--- a/bob/learn/misc/linear_scoring.cpp
+++ /dev/null
@@ -1,266 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Wed 05 Feb 16:10:48 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/*Convert a PyObject to a a list of GMMStats*/
-//template<class R, class P1, class P2>
-static int extract_gmmstats_list(PyObject *list,
-                             std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
-    PyBobLearnMiscGMMStatsObject* stats;
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
-      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
-      return -1;
-    }
-    training_data.push_back(stats->cxx);
-  }
-  return 0;
-}
-
-static int extract_gmmmachine_list(PyObject *list,
-                             std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& training_data)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
-    PyBobLearnMiscGMMMachineObject* stats;
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnMiscGMMMachine_Type, &stats)){
-      PyErr_Format(PyExc_RuntimeError, "Expected GMMMachine objects");
-      return -1;
-    }
-    training_data.push_back(stats->cxx);
-  }
-  return 0;
-}
-
-
-
-/*Convert a PyObject to a list of blitz Array*/
-template <int N>
-int extract_array_list(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
-{
-
-  if(list==0)
-    return 0;
-
-  for (int i=0; i<PyList_GET_SIZE(list); i++)
-  {
-    PyBlitzArrayObject* blitz_object; 
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
-      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
-      return -1;
-    }
-    auto blitz_object_ = make_safe(blitz_object);
-    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
-  }
-  return 0;
-}
-
-/* converts PyObject to bool and returns false if object is NULL */
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}
-
-
-/*** linear_scoring ***/
-static auto linear_scoring1 = bob::extension::FunctionDoc(
-  "linear_scoring",
-  "",
-  0,
-  true
-)
-.add_prototype("models, ubm, test_stats, test_channelOffset, frame_length_normalisation", "output")
-.add_parameter("models", "list(:py:class:`bob.learn.misc.GMMMachine`)", "")
-.add_parameter("ubm", ":py:class:`bob.learn.misc.GMMMachine`", "")
-.add_parameter("test_stats", "list(:py:class:`bob.learn.misc.GMMStats`)", "")
-.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
-.add_parameter("frame_length_normalisation", "bool", "")
-.add_return("output","array_like<float,1>","Score");
-
-
-static auto linear_scoring2 = bob::extension::FunctionDoc(
-  "linear_scoring",
-  "",
-  0,
-  true
-)
-.add_prototype("models, ubm_mean, ubm_variance, test_stats, test_channelOffset, frame_length_normalisation", "output")
-.add_parameter("models", "list(array_like<float,1>)", "")
-.add_parameter("ubm_mean", "list(array_like<float,1>)", "")
-.add_parameter("ubm_variance", "list(array_like<float,1>)", "")
-.add_parameter("test_stats", "list(:py:class:`bob.learn.misc.GMMStats`)", "")
-.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
-.add_parameter("frame_length_normalisation", "bool", "")
-.add_return("output","array_like<float,1>","Score");
-
-
-
-static auto linear_scoring3 = bob::extension::FunctionDoc(
-  "linear_scoring",
-  "",
-  0,
-  true
-)
-.add_prototype("model, ubm_mean, ubm_variance, test_stats, test_channelOffset, frame_length_normalisation", "output")
-.add_parameter("model", "array_like<float,1>", "")
-.add_parameter("ubm_mean", "array_like<float,1>", "")
-.add_parameter("ubm_variance", "array_like<float,1>", "")
-.add_parameter("test_stats", ":py:class:`bob.learn.misc.GMMStats`", "")
-.add_parameter("test_channelOffset", "array_like<float,1>", "")
-.add_parameter("frame_length_normalisation", "bool", "")
-.add_return("output","array_like<float,1>","Score");
-
-static PyObject* PyBobLearnMisc_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
-    
-  //Cheking the number of arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-    
-  //Reading the first input argument
-  PyObject* arg = 0;
-  if (PyTuple_Size(args))
-    arg = PyTuple_GET_ITEM(args, 0);
-  else {
-    PyObject* tmp = PyDict_Values(kwargs);
-    auto tmp_ = make_safe(tmp);
-    arg = PyList_GET_ITEM(tmp, 0);
-  }
-  
-  //Checking the signature of the method (list of GMMMachine as input)
-  if ((PyList_Check(arg)) && PyBobLearnMiscGMMMachine_Check(PyList_GetItem(arg, 0)) && (nargs >= 3) && (nargs<=5) ){
-  
-    char** kwlist = linear_scoring1.kwlist(0);
-
-    PyObject* gmm_list_o                 = 0;
-    PyBobLearnMiscGMMMachineObject* ubm  = 0;
-    PyObject* stats_list_o               = 0;
-    PyObject* channel_offset_list_o      = 0;
-    PyObject* frame_length_normalisation = Py_False;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!O!|O!O!", kwlist, &PyList_Type, &gmm_list_o,
-                                                                       &PyBobLearnMiscGMMMachine_Type, &ubm,
-                                                                       &PyList_Type, &stats_list_o,
-                                                                       &PyList_Type, &channel_offset_list_o,
-                                                                       &PyBool_Type, &frame_length_normalisation)){
-      linear_scoring1.print_usage();
-      Py_RETURN_NONE;
-    }
-
-    std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > stats_list;
-    if(extract_gmmstats_list(stats_list_o ,stats_list)!=0)
-      Py_RETURN_NONE;
-
-    std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> > gmm_list;
-    if(extract_gmmmachine_list(gmm_list_o ,gmm_list)!=0)
-      Py_RETURN_NONE;
-
-    std::vector<blitz::Array<double,1> > channel_offset_list;
-    if(extract_array_list(channel_offset_list_o ,channel_offset_list)!=0)
-      Py_RETURN_NONE;
-
-    blitz::Array<double, 2> scores = blitz::Array<double, 2>(gmm_list.size(), stats_list.size());
-    if(channel_offset_list.size()==0)
-      bob::learn::misc::linearScoring(gmm_list, *ubm->cxx, stats_list, f(frame_length_normalisation),scores);
-    else
-      bob::learn::misc::linearScoring(gmm_list, *ubm->cxx, stats_list, channel_offset_list, f(frame_length_normalisation),scores);
-
-    return PyBlitzArrayCxx_AsConstNumpy(scores);
-  }
-
-  //Checking the signature of the method (list of arrays as input
-  else if ((PyList_Check(arg)) && PyArray_Check(PyList_GetItem(arg, 0)) && (nargs >= 4) && (nargs<=6) ){
-  
-    char** kwlist = linear_scoring2.kwlist(0);
-
-    PyObject* model_supervector_list_o        = 0;
-    PyBlitzArrayObject* ubm_means             = 0;
-    PyBlitzArrayObject* ubm_variances         = 0;
-    PyObject* stats_list_o                    = 0;
-    PyObject* channel_offset_list_o           = 0;
-    PyObject* frame_length_normalisation      = Py_False;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&O&O!|O!O!", kwlist, &PyList_Type, &model_supervector_list_o,
-                                                                       &PyBlitzArray_Converter, &ubm_means,
-                                                                       &PyBlitzArray_Converter, &ubm_variances,
-                                                                       &PyList_Type, &stats_list_o,
-                                                                       &PyList_Type, &channel_offset_list_o,
-                                                                       &PyBool_Type, &frame_length_normalisation)){
-      linear_scoring2.print_usage(); 
-      Py_RETURN_NONE;
-    }
-    
-    //protects acquired resources through this scope
-    auto ubm_means_ = make_safe(ubm_means);
-    auto ubm_variances_ = make_safe(ubm_variances);    
-
-    std::vector<blitz::Array<double,1> > model_supervector_list;
-    if(extract_array_list(model_supervector_list_o ,model_supervector_list)!=0)
-      Py_RETURN_NONE;
-
-    std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > stats_list;
-    if(extract_gmmstats_list(stats_list_o ,stats_list)!=0)
-      Py_RETURN_NONE;
-
-    std::vector<blitz::Array<double,1> > channel_offset_list;
-    if(extract_array_list(channel_offset_list_o ,channel_offset_list)!=0)
-      Py_RETURN_NONE;
-
-    blitz::Array<double, 2> scores = blitz::Array<double, 2>(model_supervector_list.size(), stats_list.size());
-    if(channel_offset_list.size()==0)
-      bob::learn::misc::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, f(frame_length_normalisation),scores);
-    else
-      bob::learn::misc::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, channel_offset_list, f(frame_length_normalisation),scores);
-
-    return PyBlitzArrayCxx_AsConstNumpy(scores);
-  
-  }
-  
-  //Checking the signature of the method (list of arrays as input
-  else if (PyArray_Check(arg) && (nargs >= 5) && (nargs<=6) ){
-  
-    char** kwlist = linear_scoring3.kwlist(0);
-
-    PyBlitzArrayObject* model                 = 0;
-    PyBlitzArrayObject* ubm_means             = 0;
-    PyBlitzArrayObject* ubm_variances         = 0;
-    PyBobLearnMiscGMMStatsObject* stats       = 0;
-    PyBlitzArrayObject* channel_offset        = 0;
-    PyObject* frame_length_normalisation      = Py_False;
-
-    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O!O&|O!", kwlist, &PyBlitzArray_Converter, &model,
-                                                                       &PyBlitzArray_Converter, &ubm_means,
-                                                                       &PyBlitzArray_Converter, &ubm_variances,
-                                                                       &PyBobLearnMiscGMMStats_Type, &stats,
-                                                                       &PyBlitzArray_Converter, &channel_offset,
-                                                                       &PyBool_Type, &frame_length_normalisation)){
-      linear_scoring3.print_usage(); 
-      Py_RETURN_NONE;
-    }
-    
-    //protects acquired resources through this scope
-    auto model_ = make_safe(model);
-    auto ubm_means_ = make_safe(ubm_means);
-    auto ubm_variances_ = make_safe(ubm_variances);
-    auto channel_offset_ = make_safe(channel_offset);
-
-    double score = bob::learn::misc::linearScoring(*PyBlitzArrayCxx_AsBlitz<double,1>(model), *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), *stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(channel_offset), f(frame_length_normalisation));
-
-    return Py_BuildValue("d",score);
-  }
-
-  
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - linear_scoring requires 5 or 6 arguments, but you provided %d (see help)", nargs);
-    linear_scoring1.print_usage();
-    linear_scoring2.print_usage();
-    linear_scoring3.print_usage();
-    Py_RETURN_NONE;
-  }
-
-}
-
diff --git a/bob/learn/misc/main.cpp b/bob/learn/misc/main.cpp
deleted file mode 100644
index 10e1e8af0b9e059dfde1244c6cdb75f01471158d..0000000000000000000000000000000000000000
--- a/bob/learn/misc/main.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Fri Nov 21 12:39:21 CET 2014
- *
- * @brief Bindings to bob::learn::misc routines
- */
-
-#ifdef NO_IMPORT_ARRAY
-#undef NO_IMPORT_ARRAY
-#endif
-#include "main.h"
-#include "ztnorm.cpp"
-#include "linear_scoring.cpp"
-
-
-static PyMethodDef module_methods[] = {
-  {
-    zt_norm.name(),
-    (PyCFunction)PyBobLearnMisc_ztNorm,
-    METH_VARARGS|METH_KEYWORDS,
-    zt_norm.doc()
-  },
-  {
-    t_norm.name(),
-    (PyCFunction)PyBobLearnMisc_tNorm,
-    METH_VARARGS|METH_KEYWORDS,
-    t_norm.doc()
-  },
-  {
-    z_norm.name(),
-    (PyCFunction)PyBobLearnMisc_zNorm,
-    METH_VARARGS|METH_KEYWORDS,
-    z_norm.doc()
-  },
-  {
-    linear_scoring1.name(),
-    (PyCFunction)PyBobLearnMisc_linear_scoring,
-    METH_VARARGS|METH_KEYWORDS,
-    linear_scoring1.doc()
-  },
-
-  {0}//Sentinel
-};
-
-
-PyDoc_STRVAR(module_docstr, "Bob EM based Machine Learning Routines");
-
-int PyBobLearnMisc_APIVersion = BOB_LEARN_MISC_API_VERSION;
-
-
-#if PY_VERSION_HEX >= 0x03000000
-static PyModuleDef module_definition = {
-  PyModuleDef_HEAD_INIT,
-  BOB_EXT_MODULE_NAME,
-  module_docstr,
-  -1,
-  module_methods,
-  0, 0, 0, 0
-};
-#endif
-
-static PyObject* create_module (void) {
-
-# if PY_VERSION_HEX >= 0x03000000
-  PyObject* module = PyModule_Create(&module_definition);
-# else
-  PyObject* module = Py_InitModule3(BOB_EXT_MODULE_NAME, module_methods, module_docstr);
-# endif
-  if (!module) return 0;
-  auto module_ = make_safe(module); ///< protects against early returns
-
-  if (PyModule_AddStringConstant(module, "__version__", BOB_EXT_MODULE_VERSION) < 0) return 0;
-  if (!init_BobLearnMiscGaussian(module)) return 0;
-  if (!init_BobLearnMiscGMMStats(module)) return 0;
-  if (!init_BobLearnMiscGMMMachine(module)) return 0;
-  if (!init_BobLearnMiscKMeansMachine(module)) return 0;
-  if (!init_BobLearnMiscKMeansTrainer(module)) return 0;
-  //if (!init_BobLearnMiscGMMBaseTrainer(module)) return 0;
-  if (!init_BobLearnMiscMLGMMTrainer(module)) return 0;  
-  if (!init_BobLearnMiscMAPGMMTrainer(module)) return 0;
-
-  if (!init_BobLearnMiscJFABase(module)) return 0;
-  if (!init_BobLearnMiscJFAMachine(module)) return 0;
-  if (!init_BobLearnMiscJFATrainer(module)) return 0;
-
-  if (!init_BobLearnMiscISVBase(module)) return 0;
-  if (!init_BobLearnMiscISVMachine(module)) return 0;
-  if (!init_BobLearnMiscISVTrainer(module)) return 0;
-
-  if (!init_BobLearnMiscIVectorMachine(module)) return 0;
-  if (!init_BobLearnMiscIVectorTrainer(module)) return 0;
-    
-  if (!init_BobLearnMiscPLDABase(module)) return 0;
-  if (!init_BobLearnMiscPLDAMachine(module)) return 0;
-  if (!init_BobLearnMiscPLDATrainer(module)) return 0; 
-
-  if (!init_BobLearnMiscEMPCATrainer(module)) return 0;  
-
-
-  static void* PyBobLearnMisc_API[PyBobLearnMisc_API_pointers];
-
-  /* exhaustive list of C APIs */
-
-  /**************
-   * Versioning *
-   **************/
-
-  PyBobLearnMisc_API[PyBobLearnMisc_APIVersion_NUM] = (void *)&PyBobLearnMisc_APIVersion;
-
-
-#if PY_VERSION_HEX >= 0x02070000
-
-  /* defines the PyCapsule */
-
-  PyObject* c_api_object = PyCapsule_New((void *)PyBobLearnMisc_API,
-      BOB_EXT_MODULE_PREFIX "." BOB_EXT_MODULE_NAME "._C_API", 0);
-
-#else
-
-  PyObject* c_api_object = PyCObject_FromVoidPtr((void *)PyBobLearnMisc_API, 0);
-
-#endif
-
-  if (!c_api_object) return 0;
-
-  if (PyModule_AddObject(module, "_C_API", c_api_object) < 0) return 0;
-
-
-  /* imports bob.learn.misc's C-API dependencies */
-  if (import_bob_blitz() < 0) return 0;
-  if (import_bob_core_random() < 0) return 0;
-  if (import_bob_io_base() < 0) return 0;
-  //if (import_bob_learn_linear() < 0) return 0;
-
-  Py_INCREF(module);
-  return module;
-
-}
-
-PyMODINIT_FUNC BOB_EXT_ENTRY_NAME (void) {
-# if PY_VERSION_HEX >= 0x03000000
-  return
-# endif
-    create_module();
-}
diff --git a/bob/learn/misc/main.h b/bob/learn/misc/main.h
deleted file mode 100644
index 5be119c3a5a404636f4f593d5894c43fa50d36df..0000000000000000000000000000000000000000
--- a/bob/learn/misc/main.h
+++ /dev/null
@@ -1,312 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Fri Nov 21 10:31:25 CET 2014
- *
- * @brief Header file for bindings to bob::learn::em
- */
-
-#ifndef BOB_LEARN_EM_MAIN_H
-#define BOB_LEARN_EM_MAIN_H
-
-#include <bob.blitz/cppapi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.core/random_api.h>
-#include <bob.io.base/api.h>
-
-#include <bob.learn.linear/api.h>
-
-#include <bob.extension/documentation.h>
-
-#define BOB_LEARN_EM_MODULE
-#include <bob.learn.misc/api.h>
-
-#include <bob.learn.misc/Gaussian.h>
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.learn.misc/GMMMachine.h>
-#include <bob.learn.misc/KMeansMachine.h>
-
-#include <bob.learn.misc/KMeansTrainer.h>
-//#include <bob.learn.misc/GMMBaseTrainer.h>
-#include <bob.learn.misc/ML_GMMTrainer.h>
-#include <bob.learn.misc/MAP_GMMTrainer.h>
-
-#include <bob.learn.misc/JFABase.h>
-#include <bob.learn.misc/JFAMachine.h>
-#include <bob.learn.misc/JFATrainer.h>
-
-#include <bob.learn.misc/ISVBase.h>
-#include <bob.learn.misc/ISVMachine.h>
-#include <bob.learn.misc/ISVTrainer.h>
-
-
-#include <bob.learn.misc/IVectorMachine.h>
-#include <bob.learn.misc/IVectorTrainer.h>
-
-#include <bob.learn.misc/EMPCATrainer.h>
-
-#include <bob.learn.misc/PLDAMachine.h>
-#include <bob.learn.misc/PLDATrainer.h>
-
-#include <bob.learn.misc/ZTNorm.h>
-
-
-
-#if PY_VERSION_HEX >= 0x03000000
-#define PyInt_Check PyLong_Check
-#define PyInt_AS_LONG PyLong_AS_LONG
-#define PyString_Check PyUnicode_Check
-#define PyString_AS_STRING(x) PyBytes_AS_STRING(make_safe(PyUnicode_AsUTF8String(x)).get())
-#endif
-
-#define TRY try{
-
-#define CATCH(message,ret) }\
-  catch (std::exception& e) {\
-    PyErr_SetString(PyExc_RuntimeError, e.what());\
-    return ret;\
-  } \
-  catch (...) {\
-    PyErr_Format(PyExc_RuntimeError, "%s " message ": unknown exception caught", Py_TYPE(self)->tp_name);\
-    return ret;\
-  }
-
-#define CATCH_(message, ret) }\
-  catch (std::exception& e) {\
-    PyErr_SetString(PyExc_RuntimeError, e.what());\
-    return ret;\
-  } \
-  catch (...) {\
-    PyErr_Format(PyExc_RuntimeError, message ": unknown exception caught");\
-    return ret;\
-  }
-
-static inline char* c(const char* o){return const_cast<char*>(o);}  /* converts const char* to char* */
-
-/// inserts the given key, value pair into the given dictionaries
-static inline int insert_item_string(PyObject* dict, PyObject* entries, const char* key, Py_ssize_t value){
-  auto v = make_safe(Py_BuildValue("n", value));
-  if (PyDict_SetItemString(dict, key, v.get()) < 0) return -1;
-  return PyDict_SetItemString(entries, key, v.get());
-}
-
-// Gaussian
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::Gaussian> cxx;
-} PyBobLearnMiscGaussianObject;
-
-extern PyTypeObject PyBobLearnMiscGaussian_Type;
-bool init_BobLearnMiscGaussian(PyObject* module);
-int PyBobLearnMiscGaussian_Check(PyObject* o);
-
-
-// GMMStats
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::GMMStats> cxx;
-} PyBobLearnMiscGMMStatsObject;
-
-extern PyTypeObject PyBobLearnMiscGMMStats_Type;
-bool init_BobLearnMiscGMMStats(PyObject* module);
-int PyBobLearnMiscGMMStats_Check(PyObject* o);
-
-
-// GMMMachine
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::GMMMachine> cxx;
-} PyBobLearnMiscGMMMachineObject;
-
-extern PyTypeObject PyBobLearnMiscGMMMachine_Type;
-bool init_BobLearnMiscGMMMachine(PyObject* module);
-int PyBobLearnMiscGMMMachine_Check(PyObject* o);
-
-
-// KMeansMachine
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::KMeansMachine> cxx;
-} PyBobLearnMiscKMeansMachineObject;
-
-extern PyTypeObject PyBobLearnMiscKMeansMachine_Type;
-bool init_BobLearnMiscKMeansMachine(PyObject* module);
-int PyBobLearnMiscKMeansMachine_Check(PyObject* o);
-
-
-// KMeansTrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::KMeansTrainer> cxx;
-} PyBobLearnMiscKMeansTrainerObject;
-
-extern PyTypeObject PyBobLearnMiscKMeansTrainer_Type;
-bool init_BobLearnMiscKMeansTrainer(PyObject* module);
-int PyBobLearnMiscKMeansTrainer_Check(PyObject* o);
-
-
-// GMMBaseTrainer
-/*
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::GMMBaseTrainer> cxx;
-} PyBobLearnMiscGMMBaseTrainerObject;
-
-extern PyTypeObject PyBobLearnMiscGMMBaseTrainer_Type;
-bool init_BobLearnMiscGMMBaseTrainer(PyObject* module);
-int PyBobLearnMiscGMMBaseTrainer_Check(PyObject* o);
-*/
-
-// ML_GMMTrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::ML_GMMTrainer> cxx;
-} PyBobLearnMiscMLGMMTrainerObject;
-
-extern PyTypeObject PyBobLearnMiscMLGMMTrainer_Type;
-bool init_BobLearnMiscMLGMMTrainer(PyObject* module);
-int PyBobLearnMiscMLGMMTrainer_Check(PyObject* o);
-
-
-// MAP_GMMTrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::MAP_GMMTrainer> cxx;
-} PyBobLearnMiscMAPGMMTrainerObject;
-
-extern PyTypeObject PyBobLearnMiscMAPGMMTrainer_Type;
-bool init_BobLearnMiscMAPGMMTrainer(PyObject* module);
-int PyBobLearnMiscMAPGMMTrainer_Check(PyObject* o);
-
-
-// JFABase
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::JFABase> cxx;
-} PyBobLearnMiscJFABaseObject;
-
-extern PyTypeObject PyBobLearnMiscJFABase_Type;
-bool init_BobLearnMiscJFABase(PyObject* module);
-int PyBobLearnMiscJFABase_Check(PyObject* o);
-
-
-// ISVBase
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::ISVBase> cxx;
-} PyBobLearnMiscISVBaseObject;
-
-extern PyTypeObject PyBobLearnMiscISVBase_Type;
-bool init_BobLearnMiscISVBase(PyObject* module);
-int PyBobLearnMiscISVBase_Check(PyObject* o);
-
-
-// JFAMachine
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::JFAMachine> cxx;
-} PyBobLearnMiscJFAMachineObject;
-
-extern PyTypeObject PyBobLearnMiscJFAMachine_Type;
-bool init_BobLearnMiscJFAMachine(PyObject* module);
-int PyBobLearnMiscJFAMachine_Check(PyObject* o);
-
-// JFATrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::JFATrainer> cxx;
-} PyBobLearnMiscJFATrainerObject;
-
-
-extern PyTypeObject PyBobLearnMiscJFATrainer_Type;
-bool init_BobLearnMiscJFATrainer(PyObject* module);
-int PyBobLearnMiscJFATrainer_Check(PyObject* o);
-
-// ISVMachine
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::ISVMachine> cxx;
-} PyBobLearnMiscISVMachineObject;
-
-extern PyTypeObject PyBobLearnMiscISVMachine_Type;
-bool init_BobLearnMiscISVMachine(PyObject* module);
-int PyBobLearnMiscISVMachine_Check(PyObject* o);
-
-// ISVTrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::ISVTrainer> cxx;
-} PyBobLearnMiscISVTrainerObject;
-
-extern PyTypeObject PyBobLearnMiscISVTrainer_Type;
-bool init_BobLearnMiscISVTrainer(PyObject* module);
-int PyBobLearnMiscISVTrainer_Check(PyObject* o);
-
-// IVectorMachine
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::IVectorMachine> cxx;
-} PyBobLearnMiscIVectorMachineObject;
-
-extern PyTypeObject PyBobLearnMiscIVectorMachine_Type;
-bool init_BobLearnMiscIVectorMachine(PyObject* module);
-int PyBobLearnMiscIVectorMachine_Check(PyObject* o);
-
-
-// IVectorTrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::IVectorTrainer> cxx;
-} PyBobLearnMiscIVectorTrainerObject;
-
-extern PyTypeObject PyBobLearnMiscIVectorTrainer_Type;
-bool init_BobLearnMiscIVectorTrainer(PyObject* module);
-int PyBobLearnMiscIVectorTrainer_Check(PyObject* o);
-
-
-// PLDABase
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::PLDABase> cxx;
-} PyBobLearnMiscPLDABaseObject;
-
-extern PyTypeObject PyBobLearnMiscPLDABase_Type;
-bool init_BobLearnMiscPLDABase(PyObject* module);
-int PyBobLearnMiscPLDABase_Check(PyObject* o);
-
-
-// PLDAMachine
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::PLDAMachine> cxx;
-} PyBobLearnMiscPLDAMachineObject;
-
-extern PyTypeObject PyBobLearnMiscPLDAMachine_Type;
-bool init_BobLearnMiscPLDAMachine(PyObject* module);
-int PyBobLearnMiscPLDAMachine_Check(PyObject* o);
-
-
-// PLDATrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::PLDATrainer> cxx;
-} PyBobLearnMiscPLDATrainerObject;
-
-extern PyTypeObject PyBobLearnMiscPLDATrainer_Type;
-bool init_BobLearnMiscPLDATrainer(PyObject* module);
-int PyBobLearnMiscPLDATrainer_Check(PyObject* o);
-
-
-
-// EMPCATrainer
-typedef struct {
-  PyObject_HEAD
-  boost::shared_ptr<bob::learn::misc::EMPCATrainer> cxx;
-} PyBobLearnMiscEMPCATrainerObject;
-
-extern PyTypeObject PyBobLearnMiscEMPCATrainer_Type;
-bool init_BobLearnMiscEMPCATrainer(PyObject* module);
-int PyBobLearnMiscEMPCATrainer_Check(PyObject* o);
-
-
-
-#endif // BOB_LEARN_EM_MAIN_H
diff --git a/bob/learn/misc/old/blitz_numpy.cc b/bob/learn/misc/old/blitz_numpy.cc
deleted file mode 100644
index 635495294a76380bc78f1e9e6c1b3fcba00c91de..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/blitz_numpy.cc
+++ /dev/null
@@ -1,235 +0,0 @@
-/**
- * @author Andre Anjos <andre.anjos@idiap.ch>
- * @date Mon Sep 26 11:47:30 2011 +0200
- *
- * @brief Automatic converters to-from python for blitz::Array's
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-
-template<typename T, int N>
-void npy_copy_cast(blitz::Array<T,N>& bz, PyArrayObject* arrobj) {
-  PYTHON_ERROR(TypeError, "unsupported number of dimensions: %d", N);
-}
-
-template<typename T>
-static void npy_copy_cast(blitz::Array<T,1>& bz, PyArrayObject* arrobj) {
-  for (int i=0; i<PyArray_DIM(arrobj,0); ++i)
-    bz(i) = *static_cast<T*>(PyArray_GETPTR1(arrobj, i));
-}
-
-template<typename T>
-static void npy_copy_cast(blitz::Array<T,2>& bz, PyArrayObject* arrobj) {
-  for (int i=0; i<PyArray_DIM(arrobj,0); ++i)
-    for (int j=0; j<PyArray_DIM(arrobj,1); ++j)
-      bz(i,j) = *static_cast<T*>(PyArray_GETPTR2(arrobj, i, j));
-}
-
-template<typename T>
-static void npy_copy_cast(blitz::Array<T,3>& bz, PyArrayObject* arrobj) {
-  for (int i=0; i<PyArray_DIM(arrobj,0); ++i)
-    for (int j=0; j<PyArray_DIM(arrobj,1); ++j)
-      for (int k=0; k<PyArray_DIM(arrobj,2); ++k)
-        bz(i,j,k) = *static_cast<T*>(PyArray_GETPTR3(arrobj, i, j, k));
-}
-
-template<typename T>
-static void npy_copy_cast(blitz::Array<T,4>& bz, PyArrayObject* arrobj) {
-  for (int i=0; i<PyArray_DIM(arrobj,0); ++i)
-    for (int j=0; j<PyArray_DIM(arrobj,1); ++j)
-      for (int k=0; k<PyArray_DIM(arrobj,2); ++k)
-        for (int l=0; l<PyArray_DIM(arrobj,3); ++l)
-          bz(i,j,k,l) = *static_cast<T*>(PyArray_GETPTR4(arrobj, i, j, k, l));
-}
-
-/**
- * Objects of this type create a binding between blitz::Array<T,N> and
- * NumPy arrays. You can specify a NumPy array as a parameter to a
- * bound method that would normally receive a blitz::Array<T,N> or a const
- * blitz::Array<T,N>& and the conversion will just magically happen, as
- * efficiently as possible.
- *
- * Please note that passing by value should be avoided as much as possible. In
- * this mode, the underlying method will still be able to alter the underlying
- * array storage area w/o being able to modify the array itself, causing a
- * gigantic mess. If you want to make something close to pass-by-value, just
- * pass by non-const reference instead.
- */
-template <typename T, int N> struct bz_from_npy {
-
-  typedef typename blitz::Array<T,N> array_type;
-  typedef typename blitz::TinyVector<int,N> shape_type;
-
-  /**
-   * Registers converter from numpy array into a blitz::Array<T,N>
-   */
-  bz_from_npy() {
-    boost::python::converter::registry::push_back(&convertible, &construct,
-        boost::python::type_id<array_type>());
-  }
-
-  /**
-   * This method will determine if the input python object is convertible into
-   * a Array<T,N>
-   */
-  static void* convertible(PyObject* obj_ptr) {
-    boost::python::handle<> hdl(boost::python::borrowed(boost::python::allow_null(obj_ptr)));
-    boost::python::object obj(hdl);
-
-    bob::io::base::array::typeinfo tinfo(bob::io::base::array::getElementType<T>(), N);
-
-    bob::python::convert_t result = bob::python::convertible_to(obj, tinfo, false, true);
-
-    // we cannot afford copying, only referencing.
-    if (result == bob::python::BYREFERENCE) return obj_ptr;
-
-    // but, if the user passed an array of the right type, but we still need to
-    // copy, warn the user as this is a tricky case to debug.
-    PyArrayObject* arr = reinterpret_cast<PyArrayObject*>(obj_ptr);
-    if (result == bob::python::WITHARRAYCOPY &&
-        bob::python::ctype_to_num<T>() == PyArray_DESCR(arr)->type_num) {
-      PYTHON_ERROR(RuntimeError, "The bindings you are trying to use to this C++ method require a numpy.ndarray -> blitz::Array<%s,%d> conversion, but the array you passed, despite the correct type, is not C-style contiguous and/or properly aligned, so I cannot automatically wrap it. You can check this by yourself by printing the flags on such a variable with the command 'print(<varname>.flags)'. The only way to circumvent this problem, from python, is to create a copy the variable by issuing '<varname>.copy()' before calling the bound method. Otherwise, if you wish the copy to be executed automatically, you have to re-bind the method to use our custom 'const_ndarray' type.", bob::io::base::array::stringize<T>(), N);
-    }
-
-    return 0;
-  }
-
-  /**
-   * This method will finally construct the C++ element out of the python
-   * object that was input. Please note that when boost::python reaches this
-   * method, the object has already been checked for convertibility.
-   */
-  static void construct(PyObject* obj_ptr,
-      boost::python::converter::rvalue_from_python_stage1_data* data) {
-
-    //black-magic required to setup the blitz::Array<> storage area
-    void* storage = ((boost::python::converter::rvalue_from_python_storage<array_type>*)data)->storage.bytes;
-
-    PyArrayObject *arr = reinterpret_cast<PyArrayObject*>(obj_ptr);
-
-    //mounts the numpy memory at the "newly allocated" blitz::Array
-    shape_type shape;
-    shape_type stride;
-    for (int k=0; k<N; ++k) {
-      shape[k] = PyArray_DIMS(arr)[k];
-      stride[k] = (PyArray_STRIDES(arr)[k]/sizeof(T));
-    }
-    new (storage) array_type((T*)PyArray_DATA(arr), shape, stride,
-        blitz::neverDeleteData); //place operator
-    data->convertible = storage;
-
-  }
-
-};
-
-/**
- * Avoids the big number of warnings...
- */
-static PyArrayObject* make_pyarray(int nd, npy_intp* dims, int type) {
-  return (PyArrayObject*)PyArray_SimpleNew(nd, dims, type);
-}
-
-/**
- * Objects of this type bind blitz::Array<T,N> to numpy arrays. Your method
- * generates as output an object of this type and the object will be
- * automatically converted into a Numpy array.
- */
-template <typename T, int N> struct bz_to_npy {
-
-  typedef typename blitz::Array<T,N> array_type;
-  typedef typename blitz::TinyVector<int,N> shape_type;
-
-  static PyObject* convert(const array_type& tv) {
-    npy_intp dims[N];
-    for (int i=0; i<N; ++i) dims[i] = tv.extent(i);
-
-    PyArrayObject* retval = make_pyarray(N, dims, bob::python::ctype_to_num<T>());
-
-    //wrap new PyArray in a blitz layer and then copy the data
-    shape_type shape=0;
-    for (int k=0; k<PyArray_NDIM(retval); ++k) shape[k] = PyArray_DIMS(retval)[k];
-    shape_type stride=0;
-    for (int k=0; k<PyArray_NDIM(retval); ++k) stride[k] = (PyArray_STRIDES(retval)[k]/sizeof(T));
-    array_type bzdest((T*)PyArray_DATA(retval), shape, stride, blitz::neverDeleteData);
-    bzdest = tv;
-
-    return reinterpret_cast<PyObject*>(retval);
-  }
-
-  static const PyTypeObject* get_pytype() { return &PyArray_Type; }
-
-};
-
-template <typename T, int N>
-void register_bz_to_npy() {
-  boost::python::to_python_converter<typename blitz::Array<T,N>, bz_to_npy<T,N>
-#if defined BOOST_PYTHON_SUPPORTS_PY_SIGNATURES
-                          ,true
-#endif
-              >();
-}
-
-void bind_core_bz_numpy () {
-  /**
-   * The following struct constructors will make sure we can input
-   * blitz::Array<T,N> in our bound C++ routines w/o needing to specify
-   * special converters each time. The rvalue converters allow boost::python to
-   * automatically map the following inputs:
-   *
-   * a) const blitz::Array<T,N>& (pass by const reference)
-   * b) blitz::Array<T,N> (pass by value -- DO NEVER DO THIS!!!)
-   *
-   * Please note that the last case:
-   *
-   * c) blitz::Array<T,N>& (pass by non-const reference)
-   *
-   * is NOT covered by these converters. The reason being that because the
-   * object may be changed, there is no way for boost::python to update the
-   * original python object, in a sensible manner, at the return of the method.
-   *
-   * Avoid passing by non-const reference in your methods.
-   */
-#  define BOOST_PP_LOCAL_LIMITS (1, BOB_MAX_DIM)
-#  define BOOST_PP_LOCAL_MACRO(D) \
-   bz_from_npy<bool,D>();\
-   bz_from_npy<int8_t,D>();\
-   bz_from_npy<int16_t,D>();\
-   bz_from_npy<int32_t,D>();\
-   bz_from_npy<int64_t,D>();\
-   bz_from_npy<uint8_t,D>();\
-   bz_from_npy<uint16_t,D>();\
-   bz_from_npy<uint32_t,D>();\
-   bz_from_npy<uint64_t,D>();\
-   bz_from_npy<float,D>();\
-   bz_from_npy<double,D>();\
-   bz_from_npy<long double,D>();\
-   bz_from_npy<std::complex<float>,D>();\
-   bz_from_npy<std::complex<double>,D>();\
-   bz_from_npy<std::complex<long double>,D>();
-#  include BOOST_PP_LOCAL_ITERATE()
-
-  /**
-   * The following struct constructors will make C++ return values of type
-   * blitz::Array<T,N> to show up in the python side as numpy arrays.
-   */
-#  define BOOST_PP_LOCAL_LIMITS (1, BOB_MAX_DIM)
-#  define BOOST_PP_LOCAL_MACRO(D) \
-   register_bz_to_npy<bool,D>();\
-   register_bz_to_npy<int8_t,D>();\
-   register_bz_to_npy<int16_t,D>();\
-   register_bz_to_npy<int32_t,D>();\
-   register_bz_to_npy<int64_t,D>();\
-   register_bz_to_npy<uint8_t,D>();\
-   register_bz_to_npy<uint16_t,D>();\
-   register_bz_to_npy<uint32_t,D>();\
-   register_bz_to_npy<uint64_t,D>();\
-   register_bz_to_npy<float,D>();\
-   register_bz_to_npy<double,D>();\
-   register_bz_to_npy<long double,D>();\
-   register_bz_to_npy<std::complex<float>,D>();\
-   register_bz_to_npy<std::complex<double>,D>();\
-   register_bz_to_npy<std::complex<long double>,D>();
-#  include BOOST_PP_LOCAL_ITERATE()
-}
diff --git a/bob/learn/misc/old/empca_trainer.cc b/bob/learn/misc/old/empca_trainer.cc
deleted file mode 100644
index 3df1d75bd50a64a16e394ed7032c7b6f20412aed..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/empca_trainer.cc
+++ /dev/null
@@ -1,72 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Oct 11 12:32:10 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-#include "ndarray.h"
-
-#include <bob.learn.linear/pca.h>
-#include <bob.learn.misc/EMPCATrainer.h>
-
-using namespace boost::python;
-
-typedef bob::learn::misc::EMTrainer<bob::learn::linear::Machine, blitz::Array<double,2> > EMTrainerLinearBase;
-
-static void py_train(EMTrainerLinearBase& trainer,
-  bob::learn::linear::Machine& machine, bob::python::const_ndarray data)
-{
-  trainer.train(machine, data.bz<double,2>());
-}
-
-static void py_initialize(EMTrainerLinearBase& trainer,
-  bob::learn::linear::Machine& machine, bob::python::const_ndarray data)
-{
-  trainer.initialize(machine, data.bz<double,2>());
-}
-
-static void py_finalize(EMTrainerLinearBase& trainer,
-  bob::learn::linear::Machine& machine, bob::python::const_ndarray data)
-{
-  trainer.finalize(machine, data.bz<double,2>());
-}
-
-static void py_eStep(EMTrainerLinearBase& trainer,
-  bob::learn::linear::Machine& machine, bob::python::const_ndarray data)
-{
-  trainer.eStep(machine, data.bz<double,2>());
-}
-
-static void py_mStep(EMTrainerLinearBase& trainer,
-  bob::learn::linear::Machine& machine, bob::python::const_ndarray data)
-{
-  trainer.mStep(machine, data.bz<double,2>());
-}
-
-void bind_trainer_empca()
-{
-
-  class_<EMTrainerLinearBase, boost::noncopyable>("EMTrainerLinear", "The base python class for all EM-based trainers.", no_init)
-    .add_property("convergence_threshold", &EMTrainerLinearBase::getConvergenceThreshold, &EMTrainerLinearBase::setConvergenceThreshold, "Convergence threshold")
-    .add_property("max_iterations", &EMTrainerLinearBase::getMaxIterations, &EMTrainerLinearBase::setMaxIterations, "Max iterations")
-    .add_property("compute_likelihood_variable", &EMTrainerLinearBase::getComputeLikelihood, &EMTrainerLinearBase::setComputeLikelihood, "Indicates whether the log likelihood should be computed during EM or not")
-    .add_property("rng", &EMTrainerLinearBase::getRng, &EMTrainerLinearBase::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
-    .def("train", &py_train, (arg("self"), arg("machine"), arg("data")), "Trains a machine using data")
-    .def("initialize", &py_initialize, (arg("self"), arg("machine"), arg("data")), "This method is called before the EM algorithm")
-    .def("finalize", &py_finalize, (arg("self"), arg("machine"), arg("data")), "This method is called at the end of the EM algorithm")
-    .def("e_step", &py_eStep, (arg("self"), arg("machine"), arg("data")),
-       "Updates the hidden variable distribution (or the sufficient statistics) given the Machine parameters. ")
-    .def("m_step", &py_mStep, (arg("self"), arg("machine"), arg("data")), "Updates the Machine parameters given the hidden variable distribution (or the sufficient statistics)")
-    .def("compute_likelihood", &EMTrainerLinearBase::computeLikelihood, (arg("self"), arg("machine")), "Computes the current log likelihood given the hidden variable distribution (or the sufficient statistics)")
-  ;
-
-  class_<bob::learn::misc::EMPCATrainer, boost::noncopyable, bases<EMTrainerLinearBase> >("EMPCATrainer",
-      "This class implements the EM algorithm for a Linear Machine (Probabilistic PCA).\n"
-      "See Section 12.2 of Bishop, \"Pattern recognition and machine learning\", 2006", init<optional<double,size_t,bool> >((arg("self"), arg("convergence_threshold"), arg("max_iterations"), arg("compute_likelihood"))))
-    .def(init<const bob::learn::misc::EMPCATrainer&>((arg("self"), arg("trainer")), "Copy constructs an EMPCATrainer"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::EMPCATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this EMPCATrainer with the 'other' one to be approximately the same.")
-    .add_property("sigma2", &bob::learn::misc::EMPCATrainer::getSigma2, &bob::learn::misc::EMPCATrainer::setSigma2, "The noise sigma2 of the probabilistic model")
-  ;
-}
diff --git a/bob/learn/misc/old/exception.h b/bob/learn/misc/old/exception.h
deleted file mode 100644
index 7d02a7f32dcfb74a0847e7dd6341a3bdd99b3e39..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/exception.h
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * @file bob/python/exception.h
- * @date Fri Mar 25 15:21:36 2011 +0100
- * @author Andre Anjos <andre.anjos@idiap.ch>
- *
- * @brief Implements a few classes that are useful for binding bob exceptions
- * to python.
- *
- * Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_PYTHON_EXCEPTION_H
-#define BOB_PYTHON_EXCEPTION_H
-
-#include <boost/python.hpp>
-
-/**
- * @brief Raises a python exception with a formatted message
- */
-#define PYTHON_ERROR(TYPE, ...) \
-{ \
-  PyErr_Format(PyExc_##TYPE, __VA_ARGS__); \
-  throw boost::python::error_already_set(); \
-}
-
-/**
- * @brief Raises a python warning with a formatted message
- */
-#define PYTHON_WARNING(TYPE, MESSAGE) \
-{ \
-  PyErr_Warn(PyExc_##TYPE, MESSAGE); \
-}
-
-namespace bob { namespace python {
-
-  /**
-   * @brief This is a generalized exception translator for boost python. It
-   * simplifies translation declaration for as long as you provide a what()
-   * method in your exception classes that return a const char* with the
-   * exception description.
-   *
-   * If you follow that protocol, you should be able to do something like:
-   *
-   * ExceptionTranslator<std::out_of_range> t(PyExc_RuntimeError)
-   *
-   * On your boost::python modules.
-   */
-  template <typename T> struct ExceptionTranslator {
-
-    public:
-
-      void operator()(const T& cxx_except) const {
-        PyErr_SetString(m_py_except, cxx_except.what());
-      }
-
-      ExceptionTranslator(PyObject* py_except): m_py_except(py_except) {
-        boost::python::register_exception_translator<T>(*this);
-      }
-
-      ExceptionTranslator(const ExceptionTranslator& other):
-        m_py_except(other.m_py_except) {
-          //do not re-register the translator here!
-      }
-
-    private:
-
-      PyObject* m_py_except;
-
-  };
-
-  /**
-   * @brief A thin wrapper to call the translator and escape the variable
-   * naming issue when declaring multiple ExceptionTranslator's on the same
-   * module.
-   *
-   * If you think about it, it would have to look like this:
-   *
-   * ExceptionTranslator<MyException1> translator1(PyExc_RuntimeError);
-   * ExceptionTranslator<MyException2> translator2(PyExc_RuntimeError);
-   *
-   * Using this method will make it look like this:
-   *
-   * register_exception_translator<MyException1>(PyExc_RuntimeError);
-   */
-  template <typename T> void register_exception_translator(PyObject* e) {
-    ExceptionTranslator<T> my_translator(e);
-  }
-
-}}
-
-#endif /* BOB_PYTHON_EXCEPTION_H */
diff --git a/bob/learn/misc/old/gaussian.cc b/bob/learn/misc/old/gaussian.cc
deleted file mode 100644
index 70a885684952757c8be6eb6e7b795019953d6d4b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/gaussian.cc
+++ /dev/null
@@ -1,111 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Jul 26 15:11:33 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-
-#include "ndarray.h"
-#include <bob.learn.misc/Gaussian.h>
-
-
-using namespace boost::python;
-
-static void py_setMean(bob::learn::misc::Gaussian& machine,
-  bob::python::const_ndarray mean)
-{
-  machine.setMean(mean.bz<double,1>());
-}
-
-static void py_setVariance(bob::learn::misc::Gaussian& machine,
-  bob::python::const_ndarray variance)
-{
-  machine.setVariance(variance.bz<double,1>());
-}
-
-static void py_setVarianceThresholds(bob::learn::misc::Gaussian& machine,
-  bob::python::const_ndarray varianceThresholds)
-{
-  machine.setVarianceThresholds(varianceThresholds.bz<double,1>());
-}
-
-static tuple get_shape(const bob::learn::misc::Gaussian& m)
-{
-  return make_tuple(m.getNInputs());
-}
-
-static void set_shape(bob::learn::misc::Gaussian& m,
-  const blitz::TinyVector<int,1>& s)
-{
-  m.resize(s(0));
-}
-
-static double py_logLikelihood(const bob::learn::misc::Gaussian& machine,
-  bob::python::const_ndarray input)
-{
-  double output;
-  machine.forward(input.bz<double,1>(), output);
-  return output;
-}
-
-static double py_logLikelihood_(const bob::learn::misc::Gaussian& machine,
-  bob::python::const_ndarray input)
-{
-  double output;
-  machine.forward_(input.bz<double,1>(), output);
-  return output;
-}
-
-
-static boost::shared_ptr<bob::learn::misc::Gaussian> _init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::Gaussian>(new bob::learn::misc::Gaussian(*hdf5->f));
-}
-
-static void _load(bob::learn::misc::Gaussian& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void _save(const bob::learn::misc::Gaussian& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-
-void bind_machine_gaussian()
-{
-  class_<bob::learn::misc::Gaussian, boost::shared_ptr<bob::learn::misc::Gaussian>, bases<bob::learn::misc::Machine<blitz::Array<double,1>, double> > >("Gaussian",
-    "This class implements a multivariate diagonal Gaussian distribution.", no_init)
-    .def("__init__", boost::python::make_constructor(&_init))
-    .def(init<>(arg("self")))
-    .def(init<const size_t>((arg("self"), arg("n_inputs"))))
-    .def(init<bob::learn::misc::Gaussian&>((arg("self"), arg("other"))))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::Gaussian::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this Gaussian with the 'other' one to be approximately the same.")
-    .add_property("dim_d", &bob::learn::misc::Gaussian::getNInputs, &bob::learn::misc::Gaussian::setNInputs,
-      "Dimensionality of the input feature space")
-    .add_property("mean", make_function(&bob::learn::misc::Gaussian::getMean, return_value_policy<copy_const_reference>()), &py_setMean, "Mean of the Gaussian")
-    .add_property("variance", make_function(&bob::learn::misc::Gaussian::getVariance, return_value_policy<copy_const_reference>()), &py_setVariance, "The diagonal of the (diagonal) covariance matrix")
-    .add_property("variance_thresholds", make_function(&bob::learn::misc::Gaussian::getVarianceThresholds, return_value_policy<copy_const_reference>()), &py_setVarianceThresholds,
-      "The variance flooring thresholds, i.e. the minimum allowed value of variance in each dimension. "
-      "The variance will be set to this value if an attempt is made to set it to a smaller value.")
-    .add_property("shape", &get_shape, &set_shape, "A tuple that represents the dimensionality of the Gaussian ``(dim_d,)``.")
-    .def("set_variance_thresholds",  (void (bob::learn::misc::Gaussian::*)(const double))&bob::learn::misc::Gaussian::setVarianceThresholds, (arg("self"), arg("var_thd")),
-         "Set the variance flooring thresholds equal to the given threshold for all the dimensions.")
-    .def("resize", &bob::learn::misc::Gaussian::resize, (arg("self"), arg("dim_d")), "Set the input dimensionality, reset the mean to zero and the variance to one.")
-    .def("log_likelihood", &py_logLikelihood, (arg("self"), arg("sample")), "Output the log likelihood of the sample, x. The input size is checked.")
-    .def("log_likelihood_", &py_logLikelihood_, (arg("self"), arg("sample")), "Output the log likelihood of the sample, x. The input size is NOT checked.")
-    .def("save", &_save, (arg("self"), arg("config")), "Save to a Configuration")
-    .def("load", &_load, (arg("self"), arg("config")),"Load from a Configuration")
-    .def(self_ns::str(self_ns::self))
-  ;
-}
diff --git a/bob/learn/misc/old/gmm.cc b/bob/learn/misc/old/gmm.cc
deleted file mode 100644
index 00b290d0293be2309c7363f8b4d70616dd0fe0ab..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/gmm.cc
+++ /dev/null
@@ -1,330 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Jul 26 15:11:33 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-
-#include "ndarray.h"
-
-#include <bob.learn.misc/GMMStats.h>
-#include <bob.learn.misc/GMMMachine.h>
-
-using namespace boost::python;
-
-static object py_gmmstats_getN(bob::learn::misc::GMMStats& s)
-{
-  bob::python::ndarray n(bob::io::base::array::t_float64, s.n.extent(0));
-  blitz::Array<double,1> n_ = n.bz<double,1>();
-  n_ = s.n;
-  return n.self();
-}
-
-static void py_gmmstats_setN(bob::learn::misc::GMMStats& s,
-  bob::python::const_ndarray n)
-{
-  s.n = n.bz<double,1>();
-}
-
-static object py_gmmstats_getSumpx(bob::learn::misc::GMMStats& s)
-{
-  bob::python::ndarray sumpx(bob::io::base::array::t_float64, s.sumPx.extent(0),
-    s.sumPx.extent(1));
-  blitz::Array<double,2> sumpx_ = sumpx.bz<double,2>();
-  sumpx_ = s.sumPx;
-  return sumpx.self();
-}
-
-static void py_gmmstats_setSumpx(bob::learn::misc::GMMStats& s,
-  bob::python::const_ndarray sumpx)
-{
-  s.sumPx = sumpx.bz<double,2>();
-}
-
-static object py_gmmstats_getSumpxx(bob::learn::misc::GMMStats& s)
-{
-  bob::python::ndarray sumpxx(bob::io::base::array::t_float64, s.sumPxx.extent(0),
-    s.sumPxx.extent(1));
-  blitz::Array<double,2> sumpxx_ = sumpxx.bz<double,2>();
-  sumpxx_ = s.sumPxx;
-  return sumpxx.self();
-}
-
-static void py_gmmstats_setSumpxx(bob::learn::misc::GMMStats& s,
-  bob::python::const_ndarray sumpxx)
-{
-  s.sumPxx = sumpxx.bz<double,2>();
-}
-
-
-static void py_gmmmachine_setWeights(bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray weights)
-{
-  machine.setWeights(weights.bz<double,1>());
-}
-
-static object py_gmmmachine_getMeans(const bob::learn::misc::GMMMachine& machine)
-{
-  bob::python::ndarray means(bob::io::base::array::t_float64,
-    machine.getNGaussians(), machine.getNInputs());
-  blitz::Array<double,2> means_ = means.bz<double,2>();
-  machine.getMeans(means_);
-  return means.self();
-}
-
-static void py_gmmmachine_setMeans(bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray means)
-{
-  machine.setMeans(means.bz<double,2>());
-}
-
-static void py_gmmmachine_setMeanSupervector(bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray vec)
-{
-  machine.setMeanSupervector(vec.bz<double,1>());
-}
-
-static object py_gmmmachine_getVariances(const bob::learn::misc::GMMMachine& machine)
-{
-  bob::python::ndarray variances(bob::io::base::array::t_float64,
-    machine.getNGaussians(), machine.getNInputs());
-  blitz::Array<double,2> variances_ = variances.bz<double,2>();
-  machine.getVariances(variances_);
-  return variances.self();
-}
-
-static void py_gmmmachine_setVariances(bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray variances)
-{
-  machine.setVariances(variances.bz<double,2>());
-}
-
-static void py_gmmmachine_setVarianceSupervector(bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray vec)
-{
-  machine.setVarianceSupervector(vec.bz<double,1>());
-}
-
-static object py_gmmmachine_getVarianceThresholds(const bob::learn::misc::GMMMachine& machine)
-{
-  bob::python::ndarray varianceThresholds(bob::io::base::array::t_float64,
-    machine.getNGaussians(), machine.getNInputs());
-  blitz::Array<double,2> varianceThresholds_ = varianceThresholds.bz<double,2>();
-  machine.getVarianceThresholds(varianceThresholds_);
-  return varianceThresholds.self();
-}
-
-static void py_gmmmachine_setVarianceThresholds(bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray varianceThresholds)
-{
-  machine.setVarianceThresholds(varianceThresholds.bz<double,2>());
-}
-
-static void py_gmmmachine_setVarianceThresholdsOther(bob::learn::misc::GMMMachine& machine,
-  object o)
-{
-  extract<int> int_check(o);
-  extract<double> float_check(o);
-  if(int_check.check()) { //is int
-    machine.setVarianceThresholds(int_check());
-  }
-  else if(float_check.check()) { //is float
-    machine.setVarianceThresholds(float_check());
-  }
-  else {
-    //try hard-core extraction - throws TypeError, if not possible
-    extract<bob::python::const_ndarray> array_check(o);
-    if (!array_check.check())
-      PYTHON_ERROR(TypeError, "Cannot extract an array from this Python object");
-    bob::python::const_ndarray ar = array_check();
-    machine.setVarianceThresholds(ar.bz<double,1>());
-  }
-}
-
-static tuple py_gmmmachine_get_shape(const bob::learn::misc::GMMMachine& m)
-{
-  return make_tuple(m.getNGaussians(), m.getNInputs());
-}
-
-static void py_gmmmachine_set_shape(bob::learn::misc::GMMMachine& m,
-  const blitz::TinyVector<int,2>& s)
-{
-  m.resize(s(0), s(1));
-}
-
-static double py_gmmmachine_loglikelihoodA(const bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray x, bob::python::ndarray ll)
-{
-  blitz::Array<double,1> ll_ = ll.bz<double,1>();
-  return machine.logLikelihood(x.bz<double,1>(), ll_);
-}
-
-static double py_gmmmachine_loglikelihoodA_(const bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray x, bob::python::ndarray ll)
-{
-  blitz::Array<double,1> ll_ = ll.bz<double,1>();
-  return machine.logLikelihood_(x.bz<double,1>(), ll_);
-}
-
-static double py_gmmmachine_loglikelihoodB(const bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray x)
-{
-  return machine.logLikelihood(x.bz<double,1>());
-}
-
-static double py_gmmmachine_loglikelihoodB_(const bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray x)
-{
-  return machine.logLikelihood_(x.bz<double,1>());
-}
-
-static void py_gmmmachine_accStatistics(const bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray x, bob::learn::misc::GMMStats& gs)
-{
-  const bob::io::base::array::typeinfo& info = x.type();
-  switch(info.nd) {
-    case 1:
-      machine.accStatistics(x.bz<double,1>(), gs);
-      break;
-    case 2:
-      machine.accStatistics(x.bz<double,2>(), gs);
-      break;
-    default:
-      PYTHON_ERROR(TypeError, "cannot accStatistics of arrays with "  SIZE_T_FMT " dimensions (only with 1 or 2 dimensions).", info.nd);
-  }
-}
-
-static void py_gmmmachine_accStatistics_(const bob::learn::misc::GMMMachine& machine,
-  bob::python::const_ndarray x, bob::learn::misc::GMMStats& gs)
-{
-  const bob::io::base::array::typeinfo& info = x.type();
-  switch(info.nd) {
-    case 1:
-      machine.accStatistics_(x.bz<double,1>(), gs);
-      break;
-    case 2:
-      machine.accStatistics_(x.bz<double,2>(), gs);
-      break;
-    default:
-      PYTHON_ERROR(TypeError, "cannot accStatistics of arrays with "  SIZE_T_FMT " dimensions (only with 1 or 2 dimensions).", info.nd);
-  }
-}
-
-static boost::shared_ptr<bob::learn::misc::GMMStats> s_init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::GMMStats>(new bob::learn::misc::GMMStats(*hdf5->f));
-}
-
-static void s_load(bob::learn::misc::GMMStats& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void s_save(const bob::learn::misc::GMMStats& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-
-static boost::shared_ptr<bob::learn::misc::GMMMachine> m_init(boost::python::object file){
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::GMMMachine>(new bob::learn::misc::GMMMachine(*hdf5->f));
-}
-
-static void m_load(bob::learn::misc::GMMMachine& self, boost::python::object file){
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void m_save(const bob::learn::misc::GMMMachine& self, boost::python::object file){
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-void bind_machine_gmm()
-{
-  class_<bob::learn::misc::GMMStats, boost::shared_ptr<bob::learn::misc::GMMStats> >("GMMStats",
-      "A container for GMM statistics.\n"
-      "With respect to Reynolds, \"Speaker Verification Using Adapted "
-      "Gaussian Mixture Models\", DSP, 2000:\n"
-      "Eq (8) is n(i)\n"
-      "Eq (9) is sumPx(i) / n(i)\n"
-      "Eq (10) is sumPxx(i) / n(i)\n",
-      init<>(arg("self")))
-    .def("__init__", boost::python::make_constructor(&s_init))
-    .def(init<const size_t, const size_t>((arg("self"), arg("n_gaussians"), arg("n_inputs"))))
-    .def(init<bob::learn::misc::GMMStats&>((arg("self"), arg("other")), "Creates a GMMStats from another GMMStats, using the copy constructor."))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::GMMStats::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this GMMStats with the 'other' one to be approximately the same.")
-    .def_readwrite("log_likelihood", &bob::learn::misc::GMMStats::log_likelihood, "The accumulated log likelihood of all samples")
-    .def_readwrite("t", &bob::learn::misc::GMMStats::T, "The accumulated number of samples")
-    .add_property("n", &py_gmmstats_getN, &py_gmmstats_setN, "For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of P(gaussian_i|x)")
-    .add_property("sum_px", &py_gmmstats_getSumpx, &py_gmmstats_setSumpx, "For each Gaussian, the accumulated sum of responsibility times the sample ")
-    .add_property("sum_pxx", &py_gmmstats_getSumpxx, &py_gmmstats_setSumpxx, "For each Gaussian, the accumulated sum of responsibility times the sample squared")
-    .def("resize", &bob::learn::misc::GMMStats::resize, (arg("self"), arg("n_gaussians"), arg("n_inputs")),
-         " Allocates space for the statistics and resets to zero.")
-    .def("init", &bob::learn::misc::GMMStats::init, (arg("self")), "Resets statistics to zero.")
-    .def("save", &s_save, (arg("self"), arg("config")), "Save to a Configuration")
-    .def("load", &s_load, (arg("self"), arg("config")), "Load from a Configuration")
-    .def(self_ns::str(self_ns::self))
-    .def(self_ns::self += self_ns::self)
-  ;
-
-  class_<bob::learn::misc::GMMMachine, boost::shared_ptr<bob::learn::misc::GMMMachine>, bases<bob::learn::misc::Machine<blitz::Array<double,1>, double> > >("GMMMachine",
-      "This class implements a multivariate diagonal Gaussian distribution.\n"
-      "See Section 2.3.9 of Bishop, \"Pattern recognition and machine learning\", 2006",
-      init<>(arg("self")))
-    .def("__init__", boost::python::make_constructor(&m_init))
-    .def(init<bob::learn::misc::GMMMachine&>((arg("self"), arg("other")), "Creates a GMMMachine from another GMMMachine, using the copy constructor."))
-    .def(init<const size_t, const size_t>((arg("self"), arg("n_gaussians"), arg("n_inputs"))))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::GMMMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this GMMMachine with the 'other' one to be approximately the same.")
-    .add_property("dim_d", &bob::learn::misc::GMMMachine::getNInputs, &bob::learn::misc::GMMMachine::setNInputs, "The feature dimensionality D")
-    .add_property("dim_c", &bob::learn::misc::GMMMachine::getNGaussians, "The number of Gaussian components C")
-    .add_property("weights", make_function(&bob::learn::misc::GMMMachine::getWeights, return_value_policy<copy_const_reference>()), &py_gmmmachine_setWeights, "The weights (also known as \"mixing coefficients\")")
-    .add_property("means", &py_gmmmachine_getMeans, &py_gmmmachine_setMeans, "The means of the gaussians")
-    .add_property("mean_supervector", make_function((const blitz::Array<double,1>& (bob::learn::misc::GMMMachine::*)(void) const)&bob::learn::misc::GMMMachine::getMeanSupervector, return_value_policy<copy_const_reference>()), &py_gmmmachine_setMeanSupervector,
-                  "The mean supervector of the GMMMachine "
-                  "(concatenation of the mean vectors of each Gaussian of the GMMMachine")
-    .add_property("variances", &py_gmmmachine_getVariances, &py_gmmmachine_setVariances, "The (diagonal) variances of the Gaussians")
-    .add_property("variance_supervector", make_function((const blitz::Array<double,1>& (bob::learn::misc::GMMMachine::*)(void) const)&bob::learn::misc::GMMMachine::getVarianceSupervector, return_value_policy<copy_const_reference>()), &py_gmmmachine_setVarianceSupervector,
-                  "The variance supervector of the GMMMachine "
-                  "(concatenation of the variance vectors of each Gaussian of the GMMMachine")
-    .add_property("variance_thresholds", &py_gmmmachine_getVarianceThresholds, &py_gmmmachine_setVarianceThresholds,
-                  "The variance flooring thresholds for each Gaussian in each dimension")
-    .add_property("shape", &py_gmmmachine_get_shape, &py_gmmmachine_set_shape, "A tuple that represents the dimensionality of the GMMMachine ``(n_gaussians, n_inputs)``.")
-    .def("resize", &bob::learn::misc::GMMMachine::resize, (arg("self"), arg("n_gaussians"), arg("n_inputs")),
-         "Reset the input dimensionality, and the number of Gaussian components.\n"
-         "Initialises the weights to uniform distribution.")
-    .def("set_variance_thresholds", &py_gmmmachine_setVarianceThresholdsOther, (arg("self"), arg("variance_threshold")),
-         "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar.")
-    .def("update_gaussian", &bob::learn::misc::GMMMachine::updateGaussian, (arg("self"), arg("i")),
-         "Get the specified Gaussian component. An exception is thrown if i is out of range.")
-
-    .def("log_likelihood", &py_gmmmachine_loglikelihoodA, args("self", "x", "log_weighted_gaussian_likelihoods"),
-         "Output the log likelihood of the sample, x, i.e. log(p(x|bob::learn::misc::GMMMachine)). Inputs are checked.")
-    .def("log_likelihood_", &py_gmmmachine_loglikelihoodA_, args("self", "x", "log_weighted_gaussian_likelihoods"),
-         "Output the log likelihood of the sample, x, i.e. log(p(x|bob::learn::misc::GMMMachine)). Inputs are NOT checked.")
-    .def("log_likelihood", &py_gmmmachine_loglikelihoodB, args("self", "x"),
-         " Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are checked.")
-    .def("log_likelihood_", &py_gmmmachine_loglikelihoodB_, args("self", "x"),
-         " Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are checked.")
-    .def("acc_statistics", &py_gmmmachine_accStatistics, args("self", "x", "stats"),
-         "Accumulate the GMM statistics for this sample(s). Inputs are checked.")
-    .def("acc_statistics_", &py_gmmmachine_accStatistics_, args("self", "x", "stats"),
-         "Accumulate the GMM statistics for this sample(s). Inputs are NOT checked.")
-    .def("load", &m_load, (arg("self"), arg("config")), "Load from a Configuration")
-    .def("save", &m_save, (arg("self"), arg("config")), "Save to a Configuration")
-    .def(self_ns::str(self_ns::self))
-  ;
-
-}
diff --git a/bob/learn/misc/old/gmm_trainer.cc b/bob/learn/misc/old/gmm_trainer.cc
deleted file mode 100644
index 9ec49c7d8c20eeddf31332d0a0d3953c041844f9..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/gmm_trainer.cc
+++ /dev/null
@@ -1,90 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Thu Jun 9 18:12:33 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-
-#include <limits>
-#include <bob.learn.misc/GMMTrainer.h>
-#include <bob.learn.misc/MAP_GMMTrainer.h>
-#include <bob.learn.misc/ML_GMMTrainer.h>
-
-using namespace boost::python;
-
-typedef bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine, blitz::Array<double,2> > EMTrainerGMMBase;
-
-static void py_train(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.train(machine, sample.bz<double,2>());
-}
-
-static void py_initialize(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.initialize(machine, sample.bz<double,2>());
-}
-
-static void py_finalize(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.finalize(machine, sample.bz<double,2>());
-}
-
-static void py_eStep(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.eStep(machine, sample.bz<double,2>());
-}
-
-static void py_mStep(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.mStep(machine, sample.bz<double,2>());
-}
-
-void bind_trainer_gmm() {
-
-  class_<EMTrainerGMMBase, boost::noncopyable>("EMTrainerGMM", "The base python class for all EM-based trainers.", no_init)
-    .add_property("convergence_threshold", &EMTrainerGMMBase::getConvergenceThreshold, &EMTrainerGMMBase::setConvergenceThreshold, "Convergence threshold")
-    .add_property("max_iterations", &EMTrainerGMMBase::getMaxIterations, &EMTrainerGMMBase::setMaxIterations, "Max iterations")
-    .def("train", &py_train, (arg("self"), arg("machine"), arg("data")), "Train a machine using data")
-    .def("initialize", &py_initialize, (arg("self"), arg("machine"), arg("data")), "This method is called before the EM algorithm")
-    .def("finalize", &py_finalize, (arg("self"), arg("machine"), arg("data")), "This method is called after the EM algorithm")
-    .def("e_step", &py_eStep, (arg("self"), arg("machine"), arg("data")),
-       "Update the hidden variable distribution (or the sufficient statistics) given the Machine parameters. "
-       "Also, calculate the average output of the Machine given these parameters.\n"
-       "Return the average output of the Machine across the dataset. "
-       "The EM algorithm will terminate once the change in average_output "
-       "is less than the convergence_threshold.")
-    .def("m_step", &py_mStep, (arg("self"), arg("machine"), arg("data")), "Update the Machine parameters given the hidden variable distribution (or the sufficient statistics)")
-    .def("compute_likelihood", &EMTrainerGMMBase::computeLikelihood, (arg("self"), arg("machine")), "Returns the likelihood.")
-  ;
-
-  class_<bob::learn::misc::GMMTrainer, boost::noncopyable, bases<EMTrainerGMMBase> >("GMMTrainer",
-      "This class implements the E-step of the expectation-maximisation algorithm for a GMM Machine.\n"
-      "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006", no_init)
-    .add_property("gmm_statistics", make_function(&bob::learn::misc::GMMTrainer::getGMMStats, return_value_policy<copy_const_reference>()), &bob::learn::misc::GMMTrainer::setGMMStats, "The internal GMM statistics. Useful to parallelize the E-step.")
-  ;
-
-  class_<bob::learn::misc::MAP_GMMTrainer, boost::noncopyable, bases<bob::learn::misc::GMMTrainer> >("MAP_GMMTrainer",
-      "This class implements the maximum a posteriori M-step "
-      "of the expectation-maximisation algorithm for a GMM Machine. "
-      "The prior parameters are encoded in the form of a GMM (e.g. a universal background model). "
-      "The EM algorithm thus performs GMM adaptation.\n"
-      "See Section 3.4 of Reynolds et al., \"Speaker Verification Using Adapted Gaussian Mixture Models\", Digital Signal Processing, 2000. We use a \"single adaptation coefficient\", alpha_i, and thus a single relevance factor, r.",
-      init<optional<const double, const bool, const bool, const bool, const double> >((arg("self"), arg("relevance_factor")=0, arg("update_means")=true, arg("update_variances")=false, arg("update_weights")=false, arg("responsibilities_threshold")=std::numeric_limits<double>::epsilon())))
-    .def("set_prior_gmm", &bob::learn::misc::MAP_GMMTrainer::setPriorGMM, (arg("self"), arg("prior_gmm")),
-      "Set the GMM to use as a prior for MAP adaptation. "
-      "Generally, this is a \"universal background model\" (UBM), "
-      "also referred to as a \"world model\".")
-    .def("set_t3_map", &bob::learn::misc::MAP_GMMTrainer::setT3MAP, (arg("self"), arg("alpha")),
-      "Use a torch3-like MAP adaptation rule instead of Reynolds'one.")
-    .def("unset_t3_map", &bob::learn::misc::MAP_GMMTrainer::unsetT3MAP, (arg("self")),
-      "Use a Reynolds' MAP adaptation (rather than torch3-like).")
-  ;
-
-  class_<bob::learn::misc::ML_GMMTrainer, boost::noncopyable, bases<bob::learn::misc::GMMTrainer> >("ML_GMMTrainer",
-      "This class implements the maximum likelihood M-step of the expectation-maximisation algorithm for a GMM Machine.\n"
-      "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006",
-      init<optional<const bool, const bool, const bool, const double> >((arg("self"), arg("update_means")=true, arg("update_variances")=false, arg("update_weights")=false, arg("responsibilities_threshold")=std::numeric_limits<double>::epsilon())))
-  ;
-}
diff --git a/bob/learn/misc/old/ivector.cc b/bob/learn/misc/old/ivector.cc
deleted file mode 100644
index b36e66e00ce533f6123516db3a5204318514473c..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/ivector.cc
+++ /dev/null
@@ -1,136 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Sun Mar 31 18:07:00 2013 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-
-#include "ndarray.h"
-
-#include <bob.learn.misc/IVectorMachine.h>
-
-using namespace boost::python;
-
-static void py_iv_setT(bob::learn::misc::IVectorMachine& machine,
-  bob::python::const_ndarray T)
-{
-  machine.setT(T.bz<double,2>());
-}
-
-static void py_iv_setSigma(bob::learn::misc::IVectorMachine& machine,
-  bob::python::const_ndarray sigma)
-{
-  machine.setSigma(sigma.bz<double,1>());
-}
-
-static void py_computeIdTtSigmaInvT1(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs, bob::python::ndarray output)
-{
-  blitz::Array<double,2> output_ = output.bz<double,2>();
-  machine.computeIdTtSigmaInvT(gs, output_);
-}
-
-static object py_computeIdTtSigmaInvT2(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs)
-{
-  bob::python::ndarray output(bob::io::base::array::t_float64, machine.getDimRt(), machine.getDimRt());
-  blitz::Array<double,2> output_ = output.bz<double,2>();
-  machine.computeIdTtSigmaInvT(gs, output_);
-  return output.self();
-}
-
-static void py_computeTtSigmaInvFnorm1(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs, bob::python::ndarray output)
-{
-  blitz::Array<double,1> output_ = output.bz<double,1>();
-  machine.computeTtSigmaInvFnorm(gs, output_);
-}
-
-static object py_computeTtSigmaInvFnorm2(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs)
-{
-  bob::python::ndarray output(bob::io::base::array::t_float64, machine.getDimRt());
-  blitz::Array<double,1> output_ = output.bz<double,1>();
-  machine.computeTtSigmaInvFnorm(gs, output_);
-  return output.self();
-}
-
-static void py_iv_forward1(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs, bob::python::ndarray ivector)
-{
-  blitz::Array<double,1> ivector_ = ivector.bz<double,1>();
-  machine.forward(gs, ivector_);
-}
-
-static void py_iv_forward1_(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs, bob::python::ndarray ivector)
-{
-  blitz::Array<double,1> ivector_ = ivector.bz<double,1>();
-  machine.forward_(gs, ivector_);
-}
-
-static object py_iv_forward2(const bob::learn::misc::IVectorMachine& machine,
-  const bob::learn::misc::GMMStats& gs)
-{
-  bob::python::ndarray ivector(bob::io::base::array::t_float64, machine.getDimRt());
-  blitz::Array<double,1> ivector_ = ivector.bz<double,1>();
-  machine.forward(gs, ivector_);
-  return ivector.self();
-}
-
-
-
-static boost::shared_ptr<bob::learn::misc::IVectorMachine> _init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::IVectorMachine>(new bob::learn::misc::IVectorMachine(*hdf5->f));
-}
-
-static void _load(bob::learn::misc::IVectorMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void _save(const bob::learn::misc::IVectorMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-void bind_machine_ivector()
-{
-  // TODO: reuse binding from generic machine
-  class_<bob::learn::misc::IVectorMachine, boost::shared_ptr<bob::learn::misc::IVectorMachine> >("IVectorMachine", "An IVectorMachine to extract i-vector.\n\nReferences:\n[1] 'Front End Factor Analysis for Speaker Verification', N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet, IEEE Transactions on Audio, Speech and Language Processing, 2010, vol. 19, issue 4, pp. 788-798", init<boost::shared_ptr<bob::learn::misc::GMMMachine>, optional<const size_t, const size_t> >((arg("self"), arg("ubm"), arg("rt")=1, arg("variance_threshold")=1e-10), "Builds a new IVectorMachine."))
-    .def(init<>((arg("self")), "Constructs a new empty IVectorMachine."))
-    .def("__init__", boost::python::make_constructor(&_init), "Constructs a new IVectorMachine from a configuration file.")
-    .def(init<const bob::learn::misc::IVectorMachine&>((arg("self"), arg("machine")), "Copy constructs an IVectorMachine"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::IVectorMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this IVectorMachine with the 'other' one to be approximately the same.")
-    .def("load", &_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
-    .def("save", &_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("resize", &bob::learn::misc::IVectorMachine::resize, (arg("self"), arg("rt")), "Reset the dimensionality of the Total Variability subspace T.")
-    .add_property("ubm", &bob::learn::misc::IVectorMachine::getUbm, &bob::learn::misc::IVectorMachine::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
-    .add_property("t", make_function(&bob::learn::misc::IVectorMachine::getT, return_value_policy<copy_const_reference>()), &py_iv_setT, "The subspace T (Total Variability matrix)")
-    .add_property("sigma", make_function(&bob::learn::misc::IVectorMachine::getSigma, return_value_policy<copy_const_reference>()), &py_iv_setSigma, "The residual matrix of the model sigma")
-    .add_property("variance_threshold", &bob::learn::misc::IVectorMachine::getVarianceThreshold, &bob::learn::misc::IVectorMachine::setVarianceThreshold, "Threshold for the variance contained in sigma")
-    .add_property("dim_c", &bob::learn::misc::IVectorMachine::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::learn::misc::IVectorMachine::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::learn::misc::IVectorMachine::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_rt", &bob::learn::misc::IVectorMachine::getDimRt, "The dimensionality of the Total Variability subspace (rank of T)")
-    .def("__compute_Id_TtSigmaInvT__", &py_computeIdTtSigmaInvT1, (arg("self"), arg("gmmstats"), arg("output")), "Computes (Id + sum_{c=1}^{C} N_{i,j,c} T^{T} Sigma_{c}^{-1} T)")
-    .def("__compute_Id_TtSigmaInvT__", &py_computeIdTtSigmaInvT2, (arg("self"), arg("gmmstats")), "Computes (Id + sum_{c=1}^{C} N_{i,j,c} T^{T} Sigma_{c}^{-1} T)")
-    .def("__compute_TtSigmaInvFnorm__", &py_computeTtSigmaInvFnorm1, (arg("self"), arg("gmmstats"), arg("output")), "Computes T^{T} Sigma^{-1} sum_{c=1}^{C} (F_c - N_c mean(c))")
-    .def("__compute_TtSigmaInvFnorm__", &py_computeTtSigmaInvFnorm2, (arg("self"), arg("gmmstats")), "Computes T^{T} Sigma^{-1} sum_{c=1}^{C} (F_c - N_c mean(c))")
-    .def("__call__", &py_iv_forward1_, (arg("self"), arg("gmmstats"), arg("ivector")), "Executes the machine on the GMMStats, and updates the ivector array. NO CHECK is performed.")
-    .def("__call__", &py_iv_forward2, (arg("self"), arg("gmmstats")), "Executes the machine on the GMMStats. The ivector is allocated an returned.")
-    .def("forward", &py_iv_forward1, (arg("self"), arg("gmmstats"), arg("ivector")), "Executes the machine on the GMMStats, and updates the ivector array.")
-    .def("forward_", &py_iv_forward1_, (arg("self"), arg("gmmstats"), arg("ivector")), "Executes the machine on the GMMStats, and updates the ivector array. NO CHECK is performed.")
-    .def("forward", &py_iv_forward2, (arg("self"), arg("gmmstats")), "Executes the machine on the GMMStats. The ivector is allocated an returned.")
-  ;
-}
diff --git a/bob/learn/misc/old/ivector_trainer.cc b/bob/learn/misc/old/ivector_trainer.cc
deleted file mode 100644
index 72328bd8f55cb9aeea1f5924f88c98623bbc33fb..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/ivector_trainer.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-/**
-* @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-* @date Sun Mar 31 19:56:00 2013 +0200
-*
-* Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-*/
-
-#include "ndarray.h"
-
-#include <bob.learn.misc/IVectorTrainer.h>
-#include <bob.learn.misc/IVectorMachine.h>
-#include <bob.learn.misc/EMTrainer.h>
-#include <boost/python/stl_iterator.hpp>
-
-using namespace boost::python;
-
-typedef bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine, std::vector<bob::learn::misc::GMMStats> > EMTrainerIVectorBase;
-
-static void py_train(EMTrainerIVectorBase& trainer,
-  bob::learn::misc::IVectorMachine& machine, object data)
-{
-  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
-  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
-  trainer.train(machine, vdata);
-}
-
-static void py_initialize(EMTrainerIVectorBase& trainer,
-  bob::learn::misc::IVectorMachine& machine, object data)
-{
-  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
-  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
-  trainer.initialize(machine, vdata);
-}
-
-static void py_eStep(EMTrainerIVectorBase& trainer,
-  bob::learn::misc::IVectorMachine& machine, object data)
-{
-  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
-  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
-  trainer.eStep(machine, vdata);
-}
-
-static void py_mStep(EMTrainerIVectorBase& trainer,
-  bob::learn::misc::IVectorMachine& machine, object data)
-{
-  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
-  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
-  trainer.mStep(machine, vdata);
-}
-
-static void py_finalize(EMTrainerIVectorBase& trainer,
-  bob::learn::misc::IVectorMachine& machine, object data)
-{
-  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
-  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
-  trainer.finalize(machine, vdata);
-}
-
-static void py_set_AccNijWij2(bob::learn::misc::IVectorTrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccNijWij2(acc.bz<double,3>());
-}
-
-static void py_set_AccFnormijWij(bob::learn::misc::IVectorTrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccFnormijWij(acc.bz<double,3>());
-}
-
-static void py_set_AccNij(bob::learn::misc::IVectorTrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccNij(acc.bz<double,1>());
-}
-
-static void py_set_AccSnormij(bob::learn::misc::IVectorTrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccSnormij(acc.bz<double,2>());
-}
-
-
-
-// include the random API of bob.core
-#include <bob.core/random_api.h>
-static boost::python::object ITB_getRng(EMTrainerIVectorBase& self){
-  // create new object
-  PyObject* o = PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type,0);
-  reinterpret_cast<PyBoostMt19937Object*>(o)->rng = self.getRng().get();
-  return boost::python::object(boost::python::handle<>(boost::python::borrowed(o)));
-}
-
-#include <boost/make_shared.hpp>
-static void ITB_setRng(EMTrainerIVectorBase& self, boost::python::object rng){
-  if (!PyBoostMt19937_Check(rng.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.core.random.mt19937 object");
-  PyBoostMt19937Object* o = reinterpret_cast<PyBoostMt19937Object*>(rng.ptr());
-  self.setRng(boost::make_shared<boost::mt19937>(*o->rng));
-}
-
-
-void bind_trainer_ivector()
-{
-  class_<EMTrainerIVectorBase, boost::noncopyable>("EMTrainerIVector", "The base python class for all EM-based trainers.", no_init)
-    .add_property("convergence_threshold", &EMTrainerIVectorBase::getConvergenceThreshold, &EMTrainerIVectorBase::setConvergenceThreshold, "Convergence threshold")
-    .add_property("max_iterations", &EMTrainerIVectorBase::getMaxIterations, &EMTrainerIVectorBase::setMaxIterations, "Max iterations")
-    .add_property("compute_likelihood_variable", &EMTrainerIVectorBase::getComputeLikelihood, &EMTrainerIVectorBase::setComputeLikelihood, "Indicates whether the log likelihood should be computed during EM or not")
-    .add_property("rng", &ITB_getRng, &ITB_setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
-    .def("train", &py_train, (arg("machine"), arg("data")), "Trains a machine using data")
-    .def("initialize", &py_initialize, (arg("machine"), arg("data")), "This method is called before the EM algorithm")
-    .def("finalize", &py_finalize, (arg("machine"), arg("data")), "This method is called at the end of the EM algorithm")
-    .def("e_step", &py_eStep, (arg("machine"), arg("data")),
-       "Updates the hidden variable distribution (or the sufficient statistics) given the Machine parameters. ")
-    .def("m_step", &py_mStep, (arg("machine"), arg("data")), "Updates the Machine parameters given the hidden variable distribution (or the sufficient statistics)")
-    .def("compute_likelihood", &EMTrainerIVectorBase::computeLikelihood, (arg("machine")), "Computes the current log likelihood given the hidden variable distribution (or the sufficient statistics)")
-  ;
-
-
-  class_<bob::learn::misc::IVectorTrainer, boost::shared_ptr<bob::learn::misc::IVectorTrainer>, boost::noncopyable, bases<EMTrainerIVectorBase> >("IVectorTrainer", "An trainer to extract i-vector (i.e. for training the Total Variability matrix)\n\nReferences:\n[1] 'Front End Factor Analysis for Speaker Verification', N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet, IEEE Transactions on Audio, Speech and Language Processing, 2010, vol. 19, issue 4, pp. 788-798", init<optional<bool, double, size_t, bool> >((arg("self"), arg("update_sigma")=false, arg("convergence_threshold")=0.001, arg("max_iterations")=10, arg("compute_likelihood")=false), "Builds a new IVectorTrainer."))
-    .def(init<const bob::learn::misc::IVectorTrainer&>((arg("self"), arg("trainer")), "Copy constructs an IVectorTrainer"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::IVectorTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this IVectorTrainer with the 'other' one to be approximately the same.")
-    .add_property("acc_nij_wij2", make_function(&bob::learn::misc::IVectorTrainer::getAccNijWij2, return_value_policy<copy_const_reference>()), &py_set_AccNijWij2, "Accumulator updated during the E-step")
-    .add_property("acc_fnormij_wij", make_function(&bob::learn::misc::IVectorTrainer::getAccFnormijWij, return_value_policy<copy_const_reference>()), &py_set_AccFnormijWij, "Accumulator updated during the E-step")
-    .add_property("acc_nij", make_function(&bob::learn::misc::IVectorTrainer::getAccNij, return_value_policy<copy_const_reference>()), &py_set_AccNij, "Accumulator updated during the E-step")
-    .add_property("acc_snormij", make_function(&bob::learn::misc::IVectorTrainer::getAccSnormij, return_value_policy<copy_const_reference>()), &py_set_AccSnormij, "Accumulator updated during the E-step")
-  ;
-}
diff --git a/bob/learn/misc/old/jfa.cc b/bob/learn/misc/old/jfa.cc
deleted file mode 100644
index b5cc24ad60e31a664776b4664eb648587ac795ce..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/jfa.cc
+++ /dev/null
@@ -1,321 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Sat Jul 23 21:41:15 2011 +0200
- *
- * @brief Python bindings for the FA-related machines
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-
-#include "ndarray.h"
-
-#include <bob.learn.misc/JFAMachine.h>
-#include <bob.learn.misc/GMMMachine.h>
-
-using namespace boost::python;
-
-static void py_jfa_setU(bob::learn::misc::JFABase& machine,
-  bob::python::const_ndarray U)
-{
-  machine.setU(U.bz<double,2>());
-}
-
-static void py_jfa_setV(bob::learn::misc::JFABase& machine,
-  bob::python::const_ndarray V)
-{
-  machine.setV(V.bz<double,2>());
-}
-
-static void py_jfa_setD(bob::learn::misc::JFABase& machine,
-  bob::python::const_ndarray D)
-{
-  machine.setD(D.bz<double,1>());
-}
-
-static void py_jfa_setY(bob::learn::misc::JFAMachine& machine, bob::python::const_ndarray Y) {
-  const blitz::Array<double,1>& Y_ = Y.bz<double,1>();
-  machine.setY(Y_);
-}
-
-static void py_jfa_setZ(bob::learn::misc::JFAMachine& machine, bob::python::const_ndarray Z) {
-  const blitz::Array<double,1> Z_ = Z.bz<double,1>();
-  machine.setZ(Z_);
-}
-
-static void py_jfa_estimateX(bob::learn::misc::JFAMachine& machine,
-  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray x)
-{
-  blitz::Array<double,1> x_ = x.bz<double,1>();
-  machine.estimateX(gmm_stats, x_);
-}
-
-static void py_jfa_estimateUx(bob::learn::misc::JFAMachine& machine,
-  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray ux)
-{
-  blitz::Array<double,1> ux_ = ux.bz<double,1>();
-  machine.estimateUx(gmm_stats, ux_);
-}
-
-static double py_jfa_forwardUx(bob::learn::misc::JFAMachine& machine,
-  const bob::learn::misc::GMMStats& gmm_stats, bob::python::const_ndarray ux)
-{
-  double score;
-  machine.forward(gmm_stats, ux.bz<double,1>(), score);
-  return score;
-}
-
-
-static void py_isv_setU(bob::learn::misc::ISVBase& machine,
-  bob::python::const_ndarray U)
-{
-  machine.setU(U.bz<double,2>());
-}
-
-static void py_isv_setD(bob::learn::misc::ISVBase& machine,
-  bob::python::const_ndarray D)
-{
-  machine.setD(D.bz<double,1>());
-}
-
-static void py_isv_setZ(bob::learn::misc::ISVMachine& machine, bob::python::const_ndarray Z) {
-  machine.setZ(Z.bz<double,1>());
-}
-
-static void py_isv_estimateX(bob::learn::misc::ISVMachine& machine,
-  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray x)
-{
-  blitz::Array<double,1> x_ = x.bz<double,1>();
-  machine.estimateX(gmm_stats, x_);
-}
-
-static void py_isv_estimateUx(bob::learn::misc::ISVMachine& machine,
-  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray ux)
-{
-  blitz::Array<double,1> ux_ = ux.bz<double,1>();
-  machine.estimateUx(gmm_stats, ux_);
-}
-
-static double py_isv_forwardUx(bob::learn::misc::ISVMachine& machine,
-  const bob::learn::misc::GMMStats& gmm_stats, bob::python::const_ndarray ux)
-{
-  double score;
-  machine.forward(gmm_stats, ux.bz<double,1>(), score);
-  return score;
-}
-
-
-static double py_gen1_forward(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, double>& m,
-  const bob::learn::misc::GMMStats& stats)
-{
-  double output;
-  m.forward(stats, output);
-  return output;
-}
-
-static double py_gen1_forward_(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, double>& m,
-  const bob::learn::misc::GMMStats& stats)
-{
-  double output;
-  m.forward_(stats, output);
-  return output;
-}
-
-static void py_gen2b_forward(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >& m,
-  const bob::learn::misc::GMMStats& stats, bob::python::const_ndarray output)
-{
-  blitz::Array<double,1> output_ = output.bz<double,1>();
-  m.forward(stats, output_);
-}
-
-static void py_gen2b_forward_(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >& m,
-  const bob::learn::misc::GMMStats& stats, bob::python::const_ndarray output)
-{
-  blitz::Array<double,1> output_ = output.bz<double,1>();
-  m.forward_(stats, output_);
-}
-
-
-static boost::shared_ptr<bob::learn::misc::JFABase> jb_init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::JFABase>(new bob::learn::misc::JFABase(*hdf5->f));
-}
-
-static void jb_load(bob::learn::misc::JFABase& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void jb_save(const bob::learn::misc::JFABase& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-
-static boost::shared_ptr<bob::learn::misc::JFAMachine> jm_init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::JFAMachine>(new bob::learn::misc::JFAMachine(*hdf5->f));
-}
-
-static void jm_load(bob::learn::misc::JFAMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void jm_save(const bob::learn::misc::JFAMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-
-static boost::shared_ptr<bob::learn::misc::ISVBase> ib_init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::ISVBase>(new bob::learn::misc::ISVBase(*hdf5->f));
-}
-
-static void ib_load(bob::learn::misc::ISVBase& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void ib_save(const bob::learn::misc::ISVBase& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-
-static boost::shared_ptr<bob::learn::misc::ISVMachine> im_init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::ISVMachine>(new bob::learn::misc::ISVMachine(*hdf5->f));
-}
-
-static void im_load(bob::learn::misc::ISVMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void im_save(const bob::learn::misc::ISVMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-void bind_machine_jfa()
-{
-  class_<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double>, boost::noncopyable>("MachineGMMStatsScalarBase",
-      "Root class for all Machine<bob::learn::misc::GMMStats, double>", no_init)
-    .def("__call__", &py_gen1_forward_, (arg("self"), arg("input")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
-    .def("forward", &py_gen1_forward, (arg("self"), arg("input")), "Executes the machine on the GMMStats, and returns the (scalar) output.")
-    .def("forward_", &py_gen1_forward_, (arg("self"), arg("input")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
-  ;
-
-  class_<bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >, boost::noncopyable>("MachineGMMStatsA1DBase",
-      "Root class for all Machine<bob::learn::misc::GMMStats, blitz::Array<double,1>", no_init)
-    .def("__call__", &py_gen2b_forward_, (arg("self"), arg("input"), arg("output")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
-    .def("forward", &py_gen2b_forward, (arg("self"), arg("input"), arg("output")), "Executes the machine on the GMMStats, and returns the (scalar) output.")
-    .def("forward_", &py_gen2b_forward_, (arg("self"), arg("input"), arg("output")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
-  ;
-
-
-  class_<bob::learn::misc::JFABase, boost::shared_ptr<bob::learn::misc::JFABase>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("JFABase", "A JFABase instance can be seen as a container for U, V and D when performing Joint Factor Analysis (JFA).\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
-    .def("__init__", boost::python::make_constructor(&jb_init), "Constructs a new JFABaseMachine from a configuration file.")
-    .def(init<const boost::shared_ptr<bob::learn::misc::GMMMachine>, optional<const size_t, const size_t> >((arg("self"), arg("ubm"), arg("ru")=1, arg("rv")=1), "Builds a new JFABase."))
-    .def(init<>((arg("self")), "Constructs a 1x1 JFABase instance. You have to set a UBM GMM and resize the U, V and D subspaces afterwards."))
-    .def(init<const bob::learn::misc::JFABase&>((arg("self"), arg("machine")), "Copy constructs a JFABase"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::JFABase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFABase with the 'other' one to be approximately the same.")
-    .def("load", &jb_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
-    .def("save", &jb_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("resize", &bob::learn::misc::JFABase::resize, (arg("self"), arg("ru"), arg("rv")), "Reset the dimensionality of the subspaces U and V.")
-    .add_property("ubm", &bob::learn::misc::JFABase::getUbm, &bob::learn::misc::JFABase::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
-    .add_property("u", make_function(&bob::learn::misc::JFABase::getU, return_value_policy<copy_const_reference>()), &py_jfa_setU, "The subspace U for within-class variations")
-    .add_property("v", make_function(&bob::learn::misc::JFABase::getV, return_value_policy<copy_const_reference>()), &py_jfa_setV, "The subspace V for between-class variations")
-    .add_property("d", make_function(&bob::learn::misc::JFABase::getD, return_value_policy<copy_const_reference>()), &py_jfa_setD, "The subspace D for residual variations")
-    .add_property("dim_c", &bob::learn::misc::JFABase::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::learn::misc::JFABase::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::learn::misc::JFABase::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::learn::misc::JFABase::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
-    .add_property("dim_rv", &bob::learn::misc::JFABase::getDimRv, "The dimensionality of the between-class variations subspace (rank of V)")
-  ;
-
-  class_<bob::learn::misc::JFAMachine, boost::shared_ptr<bob::learn::misc::JFAMachine>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("JFAMachine", "A JFAMachine. An attached JFABase should be provided for Joint Factor Analysis. The JFAMachine carries information about the speaker factors y and z, whereas a JFABase carries information about the matrices U, V and D.\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
-    .def("__init__", boost::python::make_constructor(&jm_init), "Constructs a new JFAMachine from a configuration file.")
-    .def(init<>((arg("self")), "Constructs a 1x1 JFAMachine instance. You have to set a JFABase afterwards."))
-    .def(init<const boost::shared_ptr<bob::learn::misc::JFABase> >((arg("self"), arg("jfa_base")), "Builds a new JFAMachine."))
-    .def(init<const bob::learn::misc::JFAMachine&>((arg("self"), arg("machine")), "Copy constructs a JFAMachine"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::JFAMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFABase with the 'other' one to be approximately the same.")
-    .def("load", &jm_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
-    .def("save", &jm_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("estimate_x", &py_jfa_estimateX, (arg("self"), arg("stats"), arg("x")), "Estimates the session offset x (LPT assumption) given GMM statistics.")
-    .def("estimate_ux", &py_jfa_estimateUx, (arg("self"), arg("stats"), arg("ux")), "Estimates Ux (LPT assumption) given GMM statistics.")
-    .def("forward_ux", &py_jfa_forwardUx, (arg("self"), arg("stats"), arg("ux")), "Processes the GMM statistics and Ux to return a score.")
-    .add_property("jfa_base", &bob::learn::misc::JFAMachine::getJFABase, &bob::learn::misc::JFAMachine::setJFABase, "The JFABase attached to this machine")
-    .add_property("__x__", make_function(&bob::learn::misc::JFAMachine::getX, return_value_policy<copy_const_reference>()), "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines.")
-    .add_property("y", make_function(&bob::learn::misc::JFAMachine::getY, return_value_policy<copy_const_reference>()), &py_jfa_setY, "The latent variable y of this machine")
-    .add_property("z", make_function(&bob::learn::misc::JFAMachine::getZ, return_value_policy<copy_const_reference>()), &py_jfa_setZ, "The latent variable z of this machine")
-    .add_property("dim_c", &bob::learn::misc::JFAMachine::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::learn::misc::JFAMachine::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::learn::misc::JFAMachine::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::learn::misc::JFAMachine::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
-    .add_property("dim_rv", &bob::learn::misc::JFAMachine::getDimRv, "The dimensionality of the between-class variations subspace (rank of V)")
-  ;
-
-  class_<bob::learn::misc::ISVBase, boost::shared_ptr<bob::learn::misc::ISVBase>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("ISVBase", "An ISVBase instance can be seen as a container for U and D when performing Joint Factor Analysis (ISV). \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
-    .def("__init__", boost::python::make_constructor(&ib_init), "Constructs a new ISVBaseMachine from a configuration file.")
-    .def(init<const boost::shared_ptr<bob::learn::misc::GMMMachine>, optional<const size_t> >((arg("self"), arg("ubm"), arg("ru")=1), "Builds a new ISVBase."))
-    .def(init<>((arg("self")), "Constructs a 1 ISVBase instance. You have to set a UBM GMM and resize the U and D subspaces afterwards."))
-    .def(init<const bob::learn::misc::ISVBase&>((arg("self"), arg("machine")), "Copy constructs an ISVBase"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::ISVBase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVBase with the 'other' one to be approximately the same.")
-    .def("load", &ib_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
-    .def("save", &ib_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("resize", &bob::learn::misc::ISVBase::resize, (arg("self"), arg("ru")), "Reset the dimensionality of the subspaces U.")
-    .add_property("ubm", &bob::learn::misc::ISVBase::getUbm, &bob::learn::misc::ISVBase::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
-    .add_property("u", make_function(&bob::learn::misc::ISVBase::getU, return_value_policy<copy_const_reference>()), &py_isv_setU, "The subspace U for within-class variations")
-    .add_property("d", make_function(&bob::learn::misc::ISVBase::getD, return_value_policy<copy_const_reference>()), &py_isv_setD, "The subspace D for residual variations")
-    .add_property("dim_c", &bob::learn::misc::ISVBase::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::learn::misc::ISVBase::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::learn::misc::ISVBase::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::learn::misc::ISVBase::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
-  ;
-
-  class_<bob::learn::misc::ISVMachine, boost::shared_ptr<bob::learn::misc::ISVMachine>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("ISVMachine", "An ISVMachine. An attached ISVBase should be provided for Inter-session Variability Modelling. The ISVMachine carries information about the speaker factors z, whereas a ISVBase carries information about the matrices U and D. \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
-    .def("__init__", boost::python::make_constructor(&im_init), "Constructs a new ISVMachine from a configuration file.")
-    .def(init<>((arg("self")), "Constructs a 1 ISVMachine instance. You have to set a ISVBase afterwards."))
-    .def(init<const boost::shared_ptr<bob::learn::misc::ISVBase> >((arg("self"), arg("isv_base")), "Builds a new ISVMachine."))
-    .def(init<const bob::learn::misc::ISVMachine&>((arg("self"), arg("machine")), "Copy constructs an ISVMachine"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::ISVMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVBase with the 'other' one to be approximately the same.")
-    .def("load", &im_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
-    .def("save", &im_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("estimate_x", &py_isv_estimateX, (arg("self"), arg("stats"), arg("x")), "Estimates the session offset x (LPT assumption) given GMM statistics.")
-    .def("estimate_ux", &py_isv_estimateUx, (arg("self"), arg("stats"), arg("ux")), "Estimates Ux (LPT assumption) given GMM statistics.")
-    .def("forward_ux", &py_isv_forwardUx, (arg("self"), arg("stats"), arg("ux")), "Processes the GMM statistics and Ux to return a score.")
-    .add_property("isv_base", &bob::learn::misc::ISVMachine::getISVBase, &bob::learn::misc::ISVMachine::setISVBase, "The ISVBase attached to this machine")
-    .add_property("__x__", make_function(&bob::learn::misc::ISVMachine::getX, return_value_policy<copy_const_reference>()), "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines.")
-    .add_property("z", make_function(&bob::learn::misc::ISVMachine::getZ, return_value_policy<copy_const_reference>()), &py_isv_setZ, "The latent variable z of this machine")
-    .add_property("dim_c", &bob::learn::misc::ISVMachine::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::learn::misc::ISVMachine::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::learn::misc::ISVMachine::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::learn::misc::ISVMachine::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
-  ;
-}
diff --git a/bob/learn/misc/old/jfa_trainer.cc b/bob/learn/misc/old/jfa_trainer.cc
deleted file mode 100644
index 542c33ab8f987658997ab7c927bf07d4c8a56d31..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/jfa_trainer.cc
+++ /dev/null
@@ -1,392 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Jul 19 12:16:17 2011 +0200
- *
- * @brief Python bindings to Joint Factor Analysis trainers
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-#include <boost/python/stl_iterator.hpp>
-#include <bob.learn.misc/JFATrainer.h>
-
-using namespace boost::python;
-
-template <int N>
-static object vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
-{
-  list retval;
-  for(size_t k=0; k<vec.size(); ++k)
-    retval.append(vec[k]);
-  return tuple(retval);
-}
-
-static void extract_GMMStats(object data,
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& training_data)
-{
-  stl_input_iterator<object> dbegin(data), dend;
-  std::vector<object> vvdata(dbegin, dend);
-  for (size_t i=0; i<vvdata.size(); ++i)
-  {
-    stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dlbegin(vvdata[i]), dlend;
-    training_data.push_back(std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >(dlbegin, dlend));
-  }
-}
-
-static void isv_train(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the train function
-  t.train(m, training_data);
-}
-
-static void isv_initialize(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the initialize function
-  t.initialize(m, training_data);
-}
-
-static void isv_estep(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the E-Step function
-  t.eStep(m, training_data);
-}
-
-static void isv_mstep(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the M-Step function
-  t.mStep(m, training_data);
-}
-
-static void isv_finalize(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the finalization function
-  t.finalize(m, training_data);
-}
-
-static void isv_enrol(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVMachine& m, object data, const size_t n_iter)
-{
-  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dlbegin(data), dlend;
-  std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > vdata(dlbegin, dlend);
-  // Calls the enrol function
-  t.enrol(m, vdata, n_iter);
-}
-
-static object isv_get_x(const bob::learn::misc::ISVTrainer& t)
-{
-  return vector_as_list(t.getX());
-}
-
-static object isv_get_z(const bob::learn::misc::ISVTrainer& t)
-{
-  return vector_as_list(t.getZ());
-}
-
-static void isv_set_x(bob::learn::misc::ISVTrainer& t, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  vdata_ref.reserve(len(data));
-  for (; vdata != dend; ++vdata) vdata_ref.push_back((*vdata).bz<double,2>());
-  t.setX(vdata_ref);
-}
-
-static void isv_set_z(bob::learn::misc::ISVTrainer& t, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
-  std::vector<blitz::Array<double,1> > vdata_ref;
-  vdata_ref.reserve(len(data));
-  for (; vdata != dend; ++vdata) vdata_ref.push_back((*vdata).bz<double,1>());
-  t.setZ(vdata_ref);
-}
-
-
-static void isv_set_accUA1(bob::learn::misc::ISVTrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccUA1(acc.bz<double,3>());
-}
-
-static void isv_set_accUA2(bob::learn::misc::ISVTrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccUA2(acc.bz<double,2>());
-}
-
-
-
-static void jfa_train(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the train function
-  t.train(m, training_data);
-}
-
-static void jfa_initialize(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the initialize function
-  t.initialize(m, training_data);
-}
-
-static void jfa_estep1(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the E-Step function
-  t.eStep1(m, training_data);
-}
-
-static void jfa_mstep1(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the M-Step function
-  t.mStep1(m, training_data);
-}
-
-static void jfa_finalize1(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the finalization function
-  t.finalize1(m, training_data);
-}
-
-static void jfa_estep2(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the E-Step function
-  t.eStep2(m, training_data);
-}
-
-static void jfa_mstep2(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the M-Step function
-  t.mStep2(m, training_data);
-}
-
-static void jfa_finalize2(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the finalization function
-  t.finalize2(m, training_data);
-}
-
-static void jfa_estep3(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the E-Step function
-  t.eStep3(m, training_data);
-}
-
-static void jfa_mstep3(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the M-Step function
-  t.mStep3(m, training_data);
-}
-
-static void jfa_finalize3(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the finalization function
-  t.finalize3(m, training_data);
-}
-
-static void jfa_train_loop(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
-  extract_GMMStats(data, training_data);
-  // Calls the main loop function
-  t.train_loop(m, training_data);
-}
-
-static void jfa_enrol(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFAMachine& m, object data, const size_t n_iter)
-{
-  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dlbegin(data), dlend;
-  std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > vdata(dlbegin, dlend);
-  // Calls the enrol function
-  t.enrol(m, vdata, n_iter);
-}
-
-static object jfa_get_x(const bob::learn::misc::JFATrainer& t)
-{
-  return vector_as_list(t.getX());
-}
-
-static object jfa_get_y(const bob::learn::misc::JFATrainer& t)
-{
-  return vector_as_list(t.getY());
-}
-
-static object jfa_get_z(const bob::learn::misc::JFATrainer& t)
-{
-  return vector_as_list(t.getZ());
-}
-
-static void jfa_set_x(bob::learn::misc::JFATrainer& t, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  vdata_ref.reserve(len(data));
-  for (; vdata != dend; ++vdata) vdata_ref.push_back((*vdata).bz<double,2>());
-  t.setX(vdata_ref);
-}
-
-static void jfa_set_y(bob::learn::misc::JFATrainer& t, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
-  std::vector<blitz::Array<double,1> > vdata_ref;
-  vdata_ref.reserve(len(data));
-  for (; vdata != dend; ++vdata) vdata_ref.push_back((*vdata).bz<double,1>());
-  t.setY(vdata_ref);
-}
-
-static void jfa_set_z(bob::learn::misc::JFATrainer& t, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
-  std::vector<blitz::Array<double,1> > vdata_ref;
-  vdata_ref.reserve(len(data));
-  for (; vdata != dend; ++vdata) vdata_ref.push_back((*vdata).bz<double,1>());
-  t.setZ(vdata_ref);
-}
-
-
-static void jfa_set_accUA1(bob::learn::misc::JFATrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccUA1(acc.bz<double,3>());
-}
-
-static void jfa_set_accUA2(bob::learn::misc::JFATrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccUA2(acc.bz<double,2>());
-}
-
-static void jfa_set_accVA1(bob::learn::misc::JFATrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccVA1(acc.bz<double,3>());
-}
-
-static void jfa_set_accVA2(bob::learn::misc::JFATrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccVA2(acc.bz<double,2>());
-}
-
-static void jfa_set_accDA1(bob::learn::misc::JFATrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccDA1(acc.bz<double,1>());
-}
-
-static void jfa_set_accDA2(bob::learn::misc::JFATrainer& trainer,
-  bob::python::const_ndarray acc)
-{
-  trainer.setAccDA2(acc.bz<double,1>());
-}
-
-
-
-// include the random API of bob.core
-#include <bob.core/random_api.h>
-static boost::python::object isv_getRng(bob::learn::misc::ISVTrainer& self){
-  // create new object
-  PyObject* o = PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type,0);
-  reinterpret_cast<PyBoostMt19937Object*>(o)->rng = self.getRng().get();
-  return boost::python::object(boost::python::handle<>(boost::python::borrowed(o)));
-}
-static boost::python::object jfa_getRng(bob::learn::misc::JFATrainer& self){
-  // create new object
-  PyObject* o = PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type,0);
-  reinterpret_cast<PyBoostMt19937Object*>(o)->rng = self.getRng().get();
-  return boost::python::object(boost::python::handle<>(boost::python::borrowed(o)));
-}
-
-#include <boost/make_shared.hpp>
-static void isv_setRng(bob::learn::misc::ISVTrainer& self, boost::python::object rng){
-  if (!PyBoostMt19937_Check(rng.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.core.random.mt19937 object");
-  PyBoostMt19937Object* o = reinterpret_cast<PyBoostMt19937Object*>(rng.ptr());
-  self.setRng(boost::make_shared<boost::mt19937>(*o->rng));
-}
-static void jfa_setRng(bob::learn::misc::JFATrainer& self, boost::python::object rng){
-  if (!PyBoostMt19937_Check(rng.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.core.random.mt19937 object");
-  PyBoostMt19937Object* o = reinterpret_cast<PyBoostMt19937Object*>(rng.ptr());
-  self.setRng(boost::make_shared<boost::mt19937>(*o->rng));
-}
-
-
-void bind_trainer_jfa()
-{
-  class_<bob::learn::misc::ISVTrainer, boost::noncopyable >("ISVTrainer", "A trainer for Inter-session Variability Modelling (ISV). \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", init<optional<const size_t, const double> >((arg("self"), arg("max_iterations")=10, arg("relevance_factor")=4.),"Initializes a new ISVTrainer."))
-    .def(init<const bob::learn::misc::ISVTrainer&>((arg("self"), arg("other")), "Copy constructs an ISVTrainer"))
-    .add_property("max_iterations", &bob::learn::misc::ISVTrainer::getMaxIterations, &bob::learn::misc::ISVTrainer::setMaxIterations, "Max iterations")
-    .add_property("rng", &isv_getRng, &isv_setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
-    .add_property("__X__", &isv_get_x, &isv_set_x)
-    .add_property("__Z__", &isv_get_z, &isv_set_z)
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::ISVTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVTrainer with the 'other' one to be approximately the same.")
-    .def("train", &isv_train, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the training procedure.")
-    .def("initialize", &isv_initialize, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the initialization procedure.")
-    .def("e_step", &isv_estep, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the e-step procedure.")
-    .def("m_step", &isv_mstep, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the m-step procedure.")
-    .def("finalize", &isv_finalize, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the finalization procedure.")
-    .def("enrol", &isv_enrol, (arg("self"), arg("isv_machine"), arg("gmm_stats"), arg("n_iter")), "Call the enrolment procedure.")
-    .add_property("acc_u_a1", make_function(&bob::learn::misc::ISVTrainer::getAccUA1, return_value_policy<copy_const_reference>()), &isv_set_accUA1, "Accumulator updated during the E-step")
-    .add_property("acc_u_a2", make_function(&bob::learn::misc::ISVTrainer::getAccUA2, return_value_policy<copy_const_reference>()), &isv_set_accUA2, "Accumulator updated during the E-step")
-  ;
-
-  class_<bob::learn::misc::JFATrainer, boost::noncopyable >("JFATrainer", "A trainer for Joint Factor Analysis (JFA).\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", init<optional<const size_t> >((arg("self"), arg("max_iterations")=10),"Initializes a new JFATrainer."))
-    .def(init<const bob::learn::misc::JFATrainer&>((arg("self"), arg("other")), "Copy constructs an JFATrainer"))
-    .add_property("max_iterations", &bob::learn::misc::JFATrainer::getMaxIterations, &bob::learn::misc::JFATrainer::setMaxIterations, "Max iterations")
-    .add_property("rng", &jfa_getRng, &jfa_setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
-    .add_property("__X__", &jfa_get_x, &jfa_set_x)
-    .add_property("__Y__", &jfa_get_y, &jfa_set_y)
-    .add_property("__Z__", &jfa_get_z, &jfa_set_z)
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::JFATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFATrainer with the 'other' one to be approximately the same.")
-    .def("train", &jfa_train, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the training procedure.")
-    .def("initialize", &jfa_initialize, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the initialization procedure.")
-    .def("train_loop", &jfa_train_loop, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the training procedure (without the initialization). This will train the three subspaces U, V and d.")
-    .def("e_step1", &jfa_estep1, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 1st e-step procedure (for the V subspace).")
-    .def("m_step1", &jfa_mstep1, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 1st m-step procedure (for the V subspace).")
-    .def("finalize1", &jfa_finalize1, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 1st finalization procedure (for the V subspace).")
-    .def("e_step2", &jfa_estep2, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 2nd e-step procedure (for the U subspace).")
-    .def("m_step2", &jfa_mstep2, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 2nd m-step procedure (for the U subspace).")
-    .def("finalize2", &jfa_finalize2, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 2nd finalization procedure (for the U subspace).")
-    .def("e_step3", &jfa_estep3, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 3rd e-step procedure (for the d subspace).")
-    .def("m_step3", &jfa_mstep3, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 3rd m-step procedure (for the d subspace).")
-    .def("finalize3", &jfa_finalize3, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 3rd finalization procedure (for the d subspace).")
-    .def("enrol", &jfa_enrol, (arg("self"), arg("jfa_machine"), arg("gmm_stats"), arg("n_iter")), "Call the enrolment procedure.")
-    .add_property("acc_v_a1", make_function(&bob::learn::misc::JFATrainer::getAccVA1, return_value_policy<copy_const_reference>()), &jfa_set_accVA1, "Accumulator updated during the E-step")
-    .add_property("acc_v_a2", make_function(&bob::learn::misc::JFATrainer::getAccVA2, return_value_policy<copy_const_reference>()), &jfa_set_accVA2, "Accumulator updated during the E-step")
-    .add_property("acc_u_a1", make_function(&bob::learn::misc::JFATrainer::getAccUA1, return_value_policy<copy_const_reference>()), &jfa_set_accUA1, "Accumulator updated during the E-step")
-    .add_property("acc_u_a2", make_function(&bob::learn::misc::JFATrainer::getAccUA2, return_value_policy<copy_const_reference>()), &jfa_set_accUA2, "Accumulator updated during the E-step")
-    .add_property("acc_d_a1", make_function(&bob::learn::misc::JFATrainer::getAccDA1, return_value_policy<copy_const_reference>()), &jfa_set_accDA1, "Accumulator updated during the E-step")
-    .add_property("acc_d_a2", make_function(&bob::learn::misc::JFATrainer::getAccDA2, return_value_policy<copy_const_reference>()), &jfa_set_accDA2, "Accumulator updated during the E-step")
-  ;
-}
diff --git a/bob/learn/misc/old/kmeans.cc b/bob/learn/misc/old/kmeans.cc
deleted file mode 100644
index 96c06ecaa7f8c2845087754b490b50f1db595778..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/kmeans.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Jul 26 15:11:33 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-
-#include "ndarray.h"
-
-#include <bob.learn.misc/KMeansMachine.h>
-
-using namespace boost::python;
-
-static tuple py_getVariancesAndWeightsForEachCluster(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray ar) {
-  size_t n_means = machine.getNMeans();
-  size_t n_inputs = machine.getNInputs();
-  bob::python::ndarray variances(bob::io::base::array::t_float64, n_means, n_inputs);
-  bob::python::ndarray weights(bob::io::base::array::t_float64, n_means);
-  blitz::Array<double,2> variances_ = variances.bz<double,2>();
-  blitz::Array<double,1> weights_ = weights.bz<double,1>();
-  machine.getVariancesAndWeightsForEachCluster(ar.bz<double,2>(), variances_, weights_);
-  return boost::python::make_tuple(variances.self(), weights.self());
-}
-
-static void py_getVariancesAndWeightsForEachClusterInit(const bob::learn::misc::KMeansMachine& machine, bob::python::ndarray variances, bob::python::ndarray weights) {
-  blitz::Array<double,2> variances_ = variances.bz<double,2>();
-  blitz::Array<double,1> weights_ = weights.bz<double,1>();
-  machine.getVariancesAndWeightsForEachClusterInit(variances_, weights_);
-}
-
-static void py_getVariancesAndWeightsForEachClusterAcc(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray ar, bob::python::ndarray variances, bob::python::ndarray weights) {
-  blitz::Array<double,2> variances_ = variances.bz<double,2>();
-  blitz::Array<double,1> weights_ = weights.bz<double,1>();
-  machine.getVariancesAndWeightsForEachClusterAcc(ar.bz<double,2>(), variances_, weights_);
-}
-
-static void py_getVariancesAndWeightsForEachClusterFin(const bob::learn::misc::KMeansMachine& machine, bob::python::ndarray variances, bob::python::ndarray weights) {
-  blitz::Array<double,2> variances_ = variances.bz<double,2>();
-  blitz::Array<double,1> weights_ = weights.bz<double,1>();
-  machine.getVariancesAndWeightsForEachClusterFin(variances_, weights_);
-}
-
-static object py_getMean(const bob::learn::misc::KMeansMachine& kMeansMachine, const size_t i) {
-  bob::python::ndarray mean(bob::io::base::array::t_float64, kMeansMachine.getNInputs());
-  blitz::Array<double,1> mean_ = mean.bz<double,1>();
-  kMeansMachine.getMean(i, mean_);
-  return mean.self();
-}
-
-static void py_setMean(bob::learn::misc::KMeansMachine& machine, const size_t i, bob::python::const_ndarray mean) {
-  machine.setMean(i, mean.bz<double,1>());
-}
-
-static void py_setMeans(bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray means) {
-  machine.setMeans(means.bz<double,2>());
-}
-
-static double py_getDistanceFromMean(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray x, const size_t i)
-{
-  return machine.getDistanceFromMean(x.bz<double,1>(), i);
-}
-
-static tuple py_getClosestMean(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray x)
-{
-  size_t closest_mean;
-  double min_distance;
-  machine.getClosestMean(x.bz<double,1>(), closest_mean, min_distance);
-  return boost::python::make_tuple(closest_mean, min_distance);
-}
-
-static double py_getMinDistance(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray input)
-{
-  return machine.getMinDistance(input.bz<double,1>());
-}
-
-static void py_setCacheMeans(bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray cache_means) {
-  machine.setCacheMeans(cache_means.bz<double,2>());
-}
-
-
-static boost::shared_ptr<bob::learn::misc::KMeansMachine> _init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::KMeansMachine>(new bob::learn::misc::KMeansMachine(*hdf5->f));
-}
-
-static void _load(bob::learn::misc::KMeansMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void _save(const bob::learn::misc::KMeansMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-void bind_machine_kmeans()
-{
-  class_<bob::learn::misc::KMeansMachine, boost::shared_ptr<bob::learn::misc::KMeansMachine>,
-         bases<bob::learn::misc::Machine<blitz::Array<double,1>, double> > >("KMeansMachine",
-      "This class implements a k-means classifier.\n"
-      "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006",
-      init<>((arg("self"))))
-    .def("__init__", boost::python::make_constructor(&_init))
-    .def(init<const size_t, const size_t>((arg("self"), arg("n_means"), arg("n_inputs"))))
-    .def(init<bob::learn::misc::KMeansMachine&>((arg("self"), arg("other"))))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::KMeansMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this KMeansMachine with the 'other' one to be approximately the same.")
-    .add_property("means", make_function(&bob::learn::misc::KMeansMachine::getMeans, return_value_policy<copy_const_reference>()), &py_setMeans, "The mean vectors")
-    .add_property("__cache_means__", make_function(&bob::learn::misc::KMeansMachine::getCacheMeans, return_value_policy<copy_const_reference>()), &py_setCacheMeans, "The cache mean vectors. This should only be used when parallelizing the get_variances_and_weights_for_each_cluster() method")
-    .add_property("dim_d", &bob::learn::misc::KMeansMachine::getNInputs, "Number of inputs")
-    .add_property("dim_c", &bob::learn::misc::KMeansMachine::getNMeans, "Number of means (k)")
-    .def("resize", &bob::learn::misc::KMeansMachine::resize, (arg("self"), arg("n_means"), arg("n_inputs")), "Resize the number of means and inputs")
-    .def("get_mean", &py_getMean, (arg("self"), arg("i")), "Get the i'th mean")
-    .def("set_mean", &py_setMean, (arg("self"), arg("i"), arg("mean")), "Set the i'th mean")
-    .def("get_distance_from_mean", &py_getDistanceFromMean, (arg("self"), arg("x"), arg("i")),
-        "Return the power of two of the square Euclidean distance of the sample, x, to the i'th mean")
-    .def("get_closest_mean", &py_getClosestMean, (arg("self"), arg("x")),
-        "Calculate the index of the mean that is closest (in terms of square Euclidean distance) to the data sample, x")
-    .def("get_min_distance", &py_getMinDistance, (arg("self"), arg("input")),
-        "Output the minimum square Euclidean distance between the input and one of the means")
-    .def("get_variances_and_weights_for_each_cluster", &py_getVariancesAndWeightsForEachCluster, (arg("self"), arg("data")),
-        "For each mean, find the subset of the samples that is closest to that mean, and calculate\n"
-        "1) the variance of that subset (the cluster variance)\n"
-        "2) the proportion of the samples represented by that subset (the cluster weight)")
-    .def("__get_variances_and_weights_for_each_cluster_init__", &py_getVariancesAndWeightsForEachClusterInit, (arg("self"), arg("variances"), arg("weights")),
-        "For the parallel version of get_variances_and_weights_for_each_cluster()\n"
-        "Initialization step")
-    .def("__get_variances_and_weights_for_each_cluster_acc__", &py_getVariancesAndWeightsForEachClusterAcc, (arg("self"), arg("data"), arg("variances"), arg("weights")),
-        "For the parallel version of get_variances_and_weights_for_each_cluster()\n"
-        "Accumulation step")
-    .def("__get_variances_and_weights_for_each_cluster_fin__", &py_getVariancesAndWeightsForEachClusterFin, (arg("self"), arg("variances"), arg("weights")),
-        "For the parallel version of get_variances_and_weights_for_each_cluster()\n"
-        "Finalization step")
-    .def("load", &_load, (arg("self"), arg("config")), "Load from a Configuration")
-    .def("save", &_save, (arg("self"), arg("config")), "Save to a Configuration")
-    .def(self_ns::str(self_ns::self))
-  ;
-}
diff --git a/bob/learn/misc/old/kmeans_trainer.cc b/bob/learn/misc/old/kmeans_trainer.cc
deleted file mode 100644
index 0b90db77d5bcfbea5d927da0220bae8d87762c03..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/kmeans_trainer.cc
+++ /dev/null
@@ -1,143 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Thu Jun 9 18:12:33 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-#include <bob.learn.misc/KMeansTrainer.h>
-
-using namespace boost::python;
-
-typedef bob::learn::misc::EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> > EMTrainerKMeansBase;
-
-static void py_setZeroethOrderStats(bob::learn::misc::KMeansTrainer& op, bob::python::const_ndarray stats) {
-  const bob::io::base::array::typeinfo& info = stats.type();
-  if(info.dtype != bob::io::base::array::t_float64 || info.nd != 1)
-    PYTHON_ERROR(TypeError, "cannot set array of type '%s'", info.str().c_str());
-  op.setZeroethOrderStats(stats.bz<double,1>());
-}
-
-static void py_setFirstOrderStats(bob::learn::misc::KMeansTrainer& op, bob::python::const_ndarray stats) {
-  const bob::io::base::array::typeinfo& info = stats.type();
-  if(info.dtype != bob::io::base::array::t_float64 || info.nd != 2)
-    PYTHON_ERROR(TypeError, "cannot set array of type '%s'", info.str().c_str());
-  op.setFirstOrderStats(stats.bz<double,2>());
-}
-
-static void py_train(EMTrainerKMeansBase& trainer,
-  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.train(machine, sample.bz<double,2>());
-}
-
-static void py_initialize(EMTrainerKMeansBase& trainer,
-  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.initialize(machine, sample.bz<double,2>());
-}
-
-static void py_finalize(EMTrainerKMeansBase& trainer,
-  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.finalize(machine, sample.bz<double,2>());
-}
-
-static void py_eStep(EMTrainerKMeansBase& trainer,
-  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.eStep(machine, sample.bz<double,2>());
-}
-
-static void py_mStep(EMTrainerKMeansBase& trainer,
-  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
-{
-  trainer.mStep(machine, sample.bz<double,2>());
-}
-
-// include the random API of bob.core
-#include <bob.core/random_api.h>
-static boost::python::object KMTB_getRng(EMTrainerKMeansBase& self){
-  // create new object
-  PyObject* o = PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type,0);
-  reinterpret_cast<PyBoostMt19937Object*>(o)->rng = self.getRng().get();
-  return boost::python::object(boost::python::handle<>(boost::python::borrowed(o)));
-}
-static boost::python::object KMT_getRng(bob::learn::misc::KMeansTrainer& self){
-  // create new object
-  PyObject* o = PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type,0);
-  reinterpret_cast<PyBoostMt19937Object*>(o)->rng = self.getRng().get();
-  return boost::python::object(boost::python::handle<>(boost::python::borrowed(o)));
-}
-
-#include <boost/make_shared.hpp>
-static void KMTB_setRng(EMTrainerKMeansBase& self, boost::python::object rng){
-  if (!PyBoostMt19937_Check(rng.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.core.random.mt19937 object");
-  PyBoostMt19937Object* o = reinterpret_cast<PyBoostMt19937Object*>(rng.ptr());
-  self.setRng(boost::make_shared<boost::mt19937>(*o->rng));
-}
-static void KMT_setRng(bob::learn::misc::KMeansTrainer& self, boost::python::object rng){
-  if (!PyBoostMt19937_Check(rng.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.core.random.mt19937 object");
-  PyBoostMt19937Object* o = reinterpret_cast<PyBoostMt19937Object*>(rng.ptr());
-  self.setRng(boost::make_shared<boost::mt19937>(*o->rng));
-}
-
-
-void bind_trainer_kmeans()
-{
-  class_<EMTrainerKMeansBase, boost::noncopyable>("EMTrainerKMeans", "The base python class for all EM-based trainers.", no_init)
-    .add_property("convergence_threshold", &EMTrainerKMeansBase::getConvergenceThreshold, &EMTrainerKMeansBase::setConvergenceThreshold, "Convergence threshold")
-    .add_property("max_iterations", &EMTrainerKMeansBase::getMaxIterations, &EMTrainerKMeansBase::setMaxIterations, "Max iterations")
-    .add_property("compute_likelihood", &EMTrainerKMeansBase::getComputeLikelihood, &EMTrainerKMeansBase::setComputeLikelihood, "Tells whether we compute the average min (square Euclidean) distance or not.")
-    .add_property("rng", &KMTB_getRng, &KMTB_setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
-    .def(self == self)
-    .def(self != self)
-    .def("train", &py_train, (arg("self"), arg("machine"), arg("data")), "Train a machine using data")
-    .def("initialize", &py_initialize, (arg("machine"), arg("data")), "This method is called before the EM algorithm")
-    .def("e_step", &py_eStep, (arg("self"), arg("machine"), arg("data")),
-       "Update the hidden variable distribution (or the sufficient statistics) given the Machine parameters. "
-       "Also, calculate the average output of the Machine given these parameters.\n"
-       "Return the average output of the Machine across the dataset. "
-       "The EM algorithm will terminate once the change in average_output "
-       "is less than the convergence_threshold.")
-    .def("m_step", &py_mStep, (arg("self"), arg("machine"), arg("data")), "Update the Machine parameters given the hidden variable distribution (or the sufficient statistics)")
-    .def("compute_likelihood", &EMTrainerKMeansBase::computeLikelihood, (arg("self"), arg("machine")), "Returns the average min (square Euclidean) distance")
-    .def("finalize", &py_finalize, (arg("self"), arg("machine"), arg("data")), "This method is called after the EM algorithm")
-  ;
-
-  // Starts binding the KMeansTrainer
-  class_<bob::learn::misc::KMeansTrainer, boost::shared_ptr<bob::learn::misc::KMeansTrainer>, boost::noncopyable, bases<EMTrainerKMeansBase> > KMT("KMeansTrainer",
-      "Trains a KMeans machine.\n"
-      "This class implements the expectation-maximization algorithm for a k-means machine.\n"
-      "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006\n"
-      "It uses a random initialization of the means followed by the expectation-maximization algorithm",
-      no_init
-      );
-
-  // Binds methods that does not have nested enum values as default parameters
-  KMT.def(self == self)
-     .def(self != self)
-     .add_property("initialization_method", &bob::learn::misc::KMeansTrainer::getInitializationMethod, &bob::learn::misc::KMeansTrainer::setInitializationMethod, "The initialization method to generate the initial means.")
-     .add_property("rng", &KMT_getRng, &KMT_setRng, "The Mersenne Twister mt19937 random generator used for the initialization of the means.")
-     .add_property("average_min_distance", &bob::learn::misc::KMeansTrainer::getAverageMinDistance, &bob::learn::misc::KMeansTrainer::setAverageMinDistance, "Average min (square Euclidean) distance. Useful to parallelize the E-step.")
-     .add_property("zeroeth_order_statistics", make_function(&bob::learn::misc::KMeansTrainer::getZeroethOrderStats, return_value_policy<copy_const_reference>()), &py_setZeroethOrderStats, "The zeroeth order statistics. Useful to parallelize the E-step.")
-     .add_property("first_order_statistics", make_function(&bob::learn::misc::KMeansTrainer::getFirstOrderStats, return_value_policy<copy_const_reference>()), &py_setFirstOrderStats, "The first order statistics. Useful to parallelize the E-step.")
-    ;
-
-  // Sets the scope to the one of the KMeansTrainer
-  scope s(KMT);
-
-  // Adds enum in the previously defined current scope
-  enum_<bob::learn::misc::KMeansTrainer::InitializationMethod>("initialization_method_type")
-    .value("RANDOM", bob::learn::misc::KMeansTrainer::RANDOM)
-    .value("RANDOM_NO_DUPLICATE", bob::learn::misc::KMeansTrainer::RANDOM_NO_DUPLICATE)
-#if BOOST_VERSION >= 104700
-    .value("KMEANS_PLUS_PLUS", bob::learn::misc::KMeansTrainer::KMEANS_PLUS_PLUS)
-#endif
-    .export_values()
-    ;
-
-  // Binds methods that has nested enum values as default parameters
-  KMT.def(init<optional<double,int,bool,bob::learn::misc::KMeansTrainer::InitializationMethod> >((arg("self"), arg("convergence_threshold")=0.001, arg("max_iterations")=10, arg("compute_likelihood")=true, arg("initialization_method")=bob::learn::misc::KMeansTrainer::RANDOM)));
-}
diff --git a/bob/learn/misc/old/linearscoring.cc b/bob/learn/misc/old/linearscoring.cc
deleted file mode 100644
index b61487968e62386870c53649505e1844aeeb4615..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/linearscoring.cc
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @date Wed Jul 13 16:00:04 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-
-#include <vector>
-#include <bob.learn.misc/LinearScoring.h>
-#include <boost/python/stl_iterator.hpp>
-
-using namespace boost::python;
-
-static void convertGMMMeanList(object models, std::vector<blitz::Array<double,1> >& models_c) {
-  stl_input_iterator<bob::python::const_ndarray> dbegin(models), dend;
-  std::vector<bob::python::const_ndarray> vmodels(dbegin, dend);
-
-  for(std::vector<bob::python::const_ndarray>::iterator it=vmodels.begin();
-      it!=vmodels.end(); ++it)
-    models_c.push_back(it->bz<double,1>());
-}
-
-static void convertGMMStatsList(object test_stats, std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats_c) {
-  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dbegin(test_stats), dend;
-  test_stats_c.assign(dbegin, dend);
-}
-
-static void convertChannelOffsetList(object test_channelOffset, std::vector<blitz::Array<double,1> >& test_channelOffset_c) {
-  stl_input_iterator<bob::python::const_ndarray> dbegin(test_channelOffset), dend;
-  std::vector<bob::python::const_ndarray> vtest_channelOffset(dbegin, dend);
-
-  for(std::vector<bob::python::const_ndarray>::iterator it=vtest_channelOffset.begin();
-      it!=vtest_channelOffset.end(); ++it)
-    test_channelOffset_c.push_back(it->bz<double,1>());
-}
-
-static void convertGMMMachineList(object models, std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models_c) {
-  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMMachine> > dbegin(models), dend;
-  models_c.assign(dbegin, dend);
-}
-
-static object linearScoring1(object models,
-    bob::python::const_ndarray ubm_mean, bob::python::const_ndarray ubm_variance,
-    object test_stats, object test_channelOffset = list(), // Empty list
-    bool frame_length_normalisation = false)
-{
-  blitz::Array<double,1> ubm_mean_ = ubm_mean.bz<double,1>();
-  blitz::Array<double,1> ubm_variance_ = ubm_variance.bz<double,1>();
-
-  std::vector<blitz::Array<double,1> > models_c;
-  convertGMMMeanList(models, models_c);
-
-  std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > test_stats_c;
-  convertGMMStatsList(test_stats, test_stats_c);
-
-  bob::python::ndarray ret(bob::io::base::array::t_float64, models_c.size(), test_stats_c.size());
-  blitz::Array<double,2> ret_ = ret.bz<double,2>();
-  if (test_channelOffset.ptr() == Py_None || len(test_channelOffset) == 0) { //list is empty
-    bob::learn::misc::linearScoring(models_c, ubm_mean_, ubm_variance_, test_stats_c, frame_length_normalisation, ret_);
-  }
-  else {
-    std::vector<blitz::Array<double,1> > test_channelOffset_c;
-    convertChannelOffsetList(test_channelOffset, test_channelOffset_c);
-    bob::learn::misc::linearScoring(models_c, ubm_mean_, ubm_variance_, test_stats_c, test_channelOffset_c, frame_length_normalisation, ret_);
-  }
-
-  return ret.self();
-}
-
-static object linearScoring2(object models,
-    bob::learn::misc::GMMMachine& ubm,
-    object test_stats, object test_channelOffset = list(), // Empty list
-    bool frame_length_normalisation = false)
-{
-  std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> > models_c;
-  convertGMMMachineList(models, models_c);
-
-  std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > test_stats_c;
-  convertGMMStatsList(test_stats, test_stats_c);
-
-  bob::python::ndarray ret(bob::io::base::array::t_float64, models_c.size(), test_stats_c.size());
-  blitz::Array<double,2> ret_ = ret.bz<double,2>();
-  if (test_channelOffset.ptr() == Py_None || len(test_channelOffset) == 0) { //list is empty
-    bob::learn::misc::linearScoring(models_c, ubm, test_stats_c, frame_length_normalisation, ret_);
-  }
-  else {
-    std::vector<blitz::Array<double,1> > test_channelOffset_c;
-    convertChannelOffsetList(test_channelOffset, test_channelOffset_c);
-    bob::learn::misc::linearScoring(models_c, ubm, test_stats_c, test_channelOffset_c, frame_length_normalisation, ret_);
-  }
-
-  return ret.self();
-}
-
-static double linearScoring3(bob::python::const_ndarray model,
-  bob::python::const_ndarray ubm_mean, bob::python::const_ndarray ubm_var,
-  const bob::learn::misc::GMMStats& test_stats, bob::python::const_ndarray test_channelOffset,
-  const bool frame_length_normalisation = false)
-{
-  return bob::learn::misc::linearScoring(model.bz<double,1>(), ubm_mean.bz<double,1>(),
-          ubm_var.bz<double,1>(), test_stats, test_channelOffset.bz<double,1>(), frame_length_normalisation);
-}
-
-BOOST_PYTHON_FUNCTION_OVERLOADS(linearScoring1_overloads, linearScoring1, 4, 6)
-BOOST_PYTHON_FUNCTION_OVERLOADS(linearScoring2_overloads, linearScoring2, 3, 5)
-BOOST_PYTHON_FUNCTION_OVERLOADS(linearScoring3_overloads, linearScoring3, 5, 6)
-
-void bind_machine_linear_scoring() {
-  def("linear_scoring", linearScoring1, linearScoring1_overloads(args("models", "ubm_mean", "ubm_variance", "test_stats", "test_channelOffset", "frame_length_normalisation"),
-    "Compute a matrix of scores using linear scoring.\n"
-    "Return a 2D matrix of scores, scores[m, s] is the score for model m against statistics s\n"
-    "\n"
-    "Warning Each GMM must have the same size.\n"
-    "\n"
-    "models       -- list of mean supervectors for the client models\n"
-    "ubm_mean     -- mean supervector for the world model\n"
-    "ubm_variance -- variance supervector for the world model\n"
-    "test_stats   -- list of accumulate statistics for each test trial\n"
-    "test_channelOffset -- \n"
-    "frame_length_normlisation -- perform a normalisation by the number of feature vectors\n"
-    ));
-  def("linear_scoring", linearScoring2, linearScoring2_overloads(args("models", "ubm", "test_stats", "test_channel_offset", "frame_length_normalisation"),
-    "Compute a matrix of scores using linear scoring.\n"
-    "Return a 2D matrix of scores, scores[m, s] is the score for model m against statistics s\n"
-    "\n"
-    "Warning Each GMM must have the same size.\n"
-    "\n"
-    "models      -- list of client models\n"
-    "ubm         -- world model\n"
-    "test_stats  -- list of accumulate statistics for each test trial\n"
-    "test_channel_offset -- \n"
-    "frame_length_normlisation -- perform a normalisation by the number of feature vectors\n"
-    ));
-  def("linear_scoring", linearScoring3, linearScoring3_overloads(args("model", "ubm_mean", "ubm_variance", "test_stats", "test_channelOffset", "frame_length_normalisation"),
-    "Compute a score using linear scoring.\n"
-    "\n"
-    "model        -- mean supervectors for the client model\n"
-    "ubm_mean     -- mean supervector for the world model\n"
-    "ubm_variance -- variance supervector for the world model\n"
-    "test_stats   -- accumulate statistics for each test trial\n"
-    "test_channelOffset -- \n"
-    "frame_length_normlisation -- perform a normalisation by the number of feature vectors\n"
-    ));
-}
diff --git a/bob/learn/misc/old/machine.cc b/bob/learn/misc/old/machine.cc
deleted file mode 100644
index 31deb426b226881db55b11f16fa4e8ac8a07fd7e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/machine.cc
+++ /dev/null
@@ -1,35 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Jul 26 15:11:33 2011 +0200
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-#include <bob.learn.misc/Machine.h>
-
-using namespace boost::python;
-
-static double forward(const bob::learn::misc::Machine<blitz::Array<double,1>, double>& m,
-    bob::python::const_ndarray input) {
-  double output;
-  m.forward(input.bz<double,1>(), output);
-  return output;
-}
-
-static double forward_(const bob::learn::misc::Machine<blitz::Array<double,1>, double>& m,
-    bob::python::const_ndarray input) {
-  double output;
-  m.forward_(input.bz<double,1>(), output);
-  return output;
-}
-
-void bind_machine_base()
-{
-  class_<bob::learn::misc::Machine<blitz::Array<double,1>, double>, boost::noncopyable>("MachineDoubleBase",
-      "Root class for all Machine<blitz::Array<double,1>, double>", no_init)
-    .def("__call__", &forward_, (arg("self"), arg("input")), "Executes the machine on the given 1D numpy array of float64, and returns the output. NO CHECK is performed.")
-    .def("forward", &forward, (arg("self"), arg("input")), "Executes the machine on the given 1D numpy array of float64, and returns the output.")
-    .def("forward_", &forward_, (arg("self"), arg("input")), "Executes the machine on the given 1D numpy array of float64, and returns the output. NO CHECK is performed.")
-  ;
-}
diff --git a/bob/learn/misc/old/main.cc b/bob/learn/misc/old/main.cc
deleted file mode 100644
index f73f4fe6c4c9e814fa6d46db7e74e367fbd2d07c..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/main.cc
+++ /dev/null
@@ -1,93 +0,0 @@
-/**
- * @author André Anjos <andre.anjos@idiap.ch>
- * @date Tue Jan 18 17:07:26 2011 +0100
- *
- * @brief Combines all modules to make up the complete bindings
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifdef NO_IMPORT_ARRAY
-#undef NO_IMPORT_ARRAY
-#endif
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-#include <bob.core/random_api.h>
-
-#include "ndarray.h"
-
-/** extra bindings required for compatibility **/
-void bind_core_tinyvector();
-void bind_core_ndarray_numpy();
-void bind_core_bz_numpy();
-
-/** machine bindings **/
-void bind_machine_base();
-void bind_machine_gaussian();
-void bind_machine_gmm();
-void bind_machine_kmeans();
-void bind_machine_linear_scoring();
-void bind_machine_ztnorm();
-void bind_machine_jfa();
-void bind_machine_ivector();
-void bind_machine_plda();
-
-/** trainer bindings **/
-void bind_trainer_gmm();
-void bind_trainer_kmeans();
-void bind_trainer_jfa();
-void bind_trainer_ivector();
-void bind_trainer_plda();
-void bind_trainer_empca();
-
-BOOST_PYTHON_MODULE(_old_library) {
-
-  if (import_bob_blitz() < 0) {
-    PyErr_Print();
-    PyErr_Format(PyExc_ImportError, "cannot import `bob.blitz'");
-    return;
-  }
-
-  if (import_bob_core_random() < 0) {
-    PyErr_Print();
-    PyErr_Format(PyExc_ImportError, "cannot import `bob.core.random'");
-    return;
-  }
-
-  if (import_bob_io_base() < 0) {
-    PyErr_Print();
-    PyErr_Format(PyExc_ImportError, "cannot import `bob.io.base'");
-    return;
-  }
-
-  boost::python::docstring_options docopt(true, true, false);
-
-  bob::python::setup_python("miscelaneous machines and trainers not yet ported into the new framework");
-
-  /** extra bindings required for compatibility **/
-  bind_core_tinyvector();
-  bind_core_ndarray_numpy();
-  bind_core_bz_numpy();
-
-  /** machine bindings **/
-  bind_machine_base();
-  bind_machine_gaussian();
-  bind_machine_gmm();
-  bind_machine_kmeans();
-  bind_machine_linear_scoring();
-  bind_machine_ztnorm();
-  bind_machine_jfa();
-  bind_machine_ivector();
-  bind_machine_plda();
-
-  /** trainer bindings **/
-  bind_trainer_gmm();
-  bind_trainer_kmeans();
-  bind_trainer_jfa();
-  bind_trainer_ivector();
-  bind_trainer_plda();
-  bind_trainer_empca();
-
-}
diff --git a/bob/learn/misc/old/ndarray.cc b/bob/learn/misc/old/ndarray.cc
deleted file mode 100644
index d5f728f523343ed93292c8c6fbcd6ffde127f677..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/ndarray.cc
+++ /dev/null
@@ -1,1015 +0,0 @@
-/**
- * @author André Anjos <andre.anjos@idiap.ch>
- * @date Tue Jan 18 17:07:26 2011 +0100
- *
- * @brief Implementation of the ndarray class
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <boost/python/numeric.hpp>
-#include <boost/format.hpp>
-#include <stdexcept>
-#include <dlfcn.h>
-
-//#ifdef NO_IMPORT_ARRAY
-//#undef NO_IMPORT_ARRAY
-//#endif
-#include "ndarray.h"
-
-#define TP_ARRAY(x) ((PyArrayObject*)x.ptr())
-#define TP_OBJECT(x) (x.ptr())
-
-#define NUMPY17_API 0x00000007
-#define NUMPY16_API 0x00000006
-#define NUMPY14_API 0x00000004
-
-#include <bob.core/logging.h>
-
-#if PY_VERSION_HEX >= 0x03000000
-static void* wrap_import_array() {
-//  import_array();
-  return 0;
-}
-#else
-static void wrap_import_array() {
-//  import_array();
-  return;
-}
-#endif
-
-void bob::python::setup_python(const char* module_docstring) {
-
-  // Documentation options
-  if (module_docstring) boost::python::scope().attr("__doc__") = module_docstring;
-
-  // Required for logging C++ <-> Python interaction
-  if (!PyEval_ThreadsInitialized()) PyEval_InitThreads();
-
-  // Gets the current dlopenflags and save it
-  PyThreadState* tstate = PyThreadState_Get();
-  int old_value = tstate->interp->dlopenflags;
-
-  // Unsets the RTLD_GLOBAL flag
-  tstate->interp->dlopenflags = old_value & (~RTLD_GLOBAL);
-
-  // Loads numpy with the RTLD_GLOBAL flag unset
-  wrap_import_array();
-
-  // Resets the RTLD_GLOBAL flag
-  tstate->interp->dlopenflags = old_value;
-
-  //Sets the boost::python::numeric::array interface to use numpy.ndarray
-  //as basis. This is not strictly required, but good to set as a baseline.
-  boost::python::numeric::array::set_module_and_type("numpy", "ndarray");
-
-  // Make sure we are not running against the wrong version of NumPy
-  if (NPY_VERSION != PyArray_GetNDArrayCVersion()) {
-    PYTHON_ERROR(ImportError, "module compiled against ABI version 0x%08x but this version of numpy is 0x%08x - make sure you compile and execute against the same or compatible versions", (int) NPY_VERSION, (int) PyArray_GetNDArrayCVersion());
-  }
-
-#if NPY_FEATURE_VERSION >= NUMPY14_API /* NumPy C-API version >= 1.4 */
-  if (NPY_FEATURE_VERSION > PyArray_GetNDArrayCFeatureVersion()) {
-    PYTHON_ERROR(ImportError, "module compiled against API version 0x%08x but this version of numpy is 0x%08x - make sure you compile and execute against the same or compatible versions", (int) NPY_FEATURE_VERSION, (int) PyArray_GetNDArrayCFeatureVersion());
-  }
-#endif
-
-}
-
-/***************************************************************************
- * Dtype (PyArray_Descr) manipulations                                     *
- ***************************************************************************/
-
-int bob::python::type_to_num(bob::io::base::array::ElementType type) {
-
-  switch(type) {
-    case bob::io::base::array::t_bool:
-      return NPY_BOOL;
-    case bob::io::base::array::t_int8:
-      return NPY_INT8;
-    case bob::io::base::array::t_int16:
-      return NPY_INT16;
-    case bob::io::base::array::t_int32:
-      return NPY_INT32;
-    case bob::io::base::array::t_int64:
-      return NPY_INT64;
-    case bob::io::base::array::t_uint8:
-      return NPY_UINT8;
-    case bob::io::base::array::t_uint16:
-      return NPY_UINT16;
-    case bob::io::base::array::t_uint32:
-      return NPY_UINT32;
-    case bob::io::base::array::t_uint64:
-      return NPY_UINT64;
-    case bob::io::base::array::t_float32:
-      return NPY_FLOAT32;
-    case bob::io::base::array::t_float64:
-      return NPY_FLOAT64;
-#ifdef NPY_FLOAT128
-    case bob::io::base::array::t_float128:
-      return NPY_FLOAT128;
-#endif
-    case bob::io::base::array::t_complex64:
-      return NPY_COMPLEX64;
-    case bob::io::base::array::t_complex128:
-      return NPY_COMPLEX128;
-#ifdef NPY_COMPLEX256
-    case bob::io::base::array::t_complex256:
-      return NPY_COMPLEX256;
-#endif
-    default:
-      PYTHON_ERROR(TypeError, "unsupported C++ element type (%s)", bob::io::base::array::stringize(type));
-  }
-
-}
-
-static bob::io::base::array::ElementType signed_integer_type(int bits) {
-  switch(bits) {
-    case 8:
-      return bob::io::base::array::t_int8;
-    case 16:
-      return bob::io::base::array::t_int16;
-    case 32:
-      return bob::io::base::array::t_int32;
-    case 64:
-      return bob::io::base::array::t_int64;
-    default:
-      PYTHON_ERROR(TypeError, "unsupported signed integer element type with %d bits", bits);
-  }
-}
-
-static bob::io::base::array::ElementType unsigned_integer_type(int bits) {
-  switch(bits) {
-    case 8:
-      return bob::io::base::array::t_uint8;
-    case 16:
-      return bob::io::base::array::t_uint16;
-    case 32:
-      return bob::io::base::array::t_uint32;
-    case 64:
-      return bob::io::base::array::t_uint64;
-    default:
-      PYTHON_ERROR(TypeError, "unsupported unsigned integer element type with %d bits", bits);
-  }
-}
-
-bob::io::base::array::ElementType bob::python::num_to_type(int num) {
-  switch(num) {
-    case NPY_BOOL:
-      return bob::io::base::array::t_bool;
-
-    //signed integers
-    case NPY_BYTE:
-      return signed_integer_type(NPY_BITSOF_CHAR);
-    case NPY_SHORT:
-      return signed_integer_type(NPY_BITSOF_SHORT);
-    case NPY_INT:
-      return signed_integer_type(NPY_BITSOF_INT);
-    case NPY_LONG:
-      return signed_integer_type(NPY_BITSOF_LONG);
-    case NPY_LONGLONG:
-      return signed_integer_type(NPY_BITSOF_LONGLONG);
-
-    //unsigned integers
-    case NPY_UBYTE:
-      return unsigned_integer_type(NPY_BITSOF_CHAR);
-    case NPY_USHORT:
-      return unsigned_integer_type(NPY_BITSOF_SHORT);
-    case NPY_UINT:
-      return unsigned_integer_type(NPY_BITSOF_INT);
-    case NPY_ULONG:
-      return unsigned_integer_type(NPY_BITSOF_LONG);
-    case NPY_ULONGLONG:
-      return unsigned_integer_type(NPY_BITSOF_LONGLONG);
-
-    //floats
-    case NPY_FLOAT32:
-      return bob::io::base::array::t_float32;
-    case NPY_FLOAT64:
-      return bob::io::base::array::t_float64;
-#ifdef NPY_FLOAT128
-    case NPY_FLOAT128:
-      return bob::io::base::array::t_float128;
-#endif
-
-    //complex
-    case NPY_COMPLEX64:
-      return bob::io::base::array::t_complex64;
-    case NPY_COMPLEX128:
-      return bob::io::base::array::t_complex128;
-#ifdef NPY_COMPLEX256
-    case NPY_COMPLEX256:
-      return bob::io::base::array::t_complex256;
-#endif
-
-    default:
-      PYTHON_ERROR(TypeError, "unsupported NumPy element type (%d)", num);
-  }
-
-}
-
-template <> int bob::python::ctype_to_num<bool>(void)
-{ return NPY_BOOL; }
-
-//! @cond SKIP_DOXYGEN_WARNINGS
-template <> int bob::python::ctype_to_num<int8_t>(void)
-{ return NPY_INT8; }
-template <> int bob::python::ctype_to_num<uint8_t>(void)
-{ return NPY_UINT8; }
-template <> int bob::python::ctype_to_num<int16_t>(void)
-{ return NPY_INT16; }
-template <> int bob::python::ctype_to_num<uint16_t>(void)
-{ return NPY_UINT16; }
-template <> int bob::python::ctype_to_num<int32_t>(void)
-{ return NPY_INT32; }
-template <> int bob::python::ctype_to_num<uint32_t>(void)
-{ return NPY_UINT32; }
-template <> int bob::python::ctype_to_num<int64_t>(void)
-{ return NPY_INT64; }
-template <> int bob::python::ctype_to_num<uint64_t>(void)
-{ return NPY_UINT64; }
-template <> int bob::python::ctype_to_num<float>(void)
-{ return NPY_FLOAT32; }
-template <> int bob::python::ctype_to_num<double>(void)
-{ return NPY_FLOAT64; }
-#ifdef NPY_FLOAT128
-template <> int bob::python::ctype_to_num<long double>(void)
-{ return NPY_FLOAT128; }
-#endif
-template <> int bob::python::ctype_to_num<std::complex<float> >(void)
-{ return NPY_COMPLEX64; }
-template <> int bob::python::ctype_to_num<std::complex<double> >(void)
-{ return NPY_COMPLEX128; }
-#ifdef NPY_COMPLEX256
-template <> int bob::python::ctype_to_num<std::complex<long double> >(void)
-{ return NPY_COMPLEX256; }
-#endif
-//! @endcond SKIP_DOXYGEN_WARNINGS
-
-bob::io::base::array::ElementType bob::python::array_to_type(const boost::python::numeric::array& a) {
-  return bob::python::num_to_type(PyArray_DESCR(TP_ARRAY(a))->type_num);
-}
-
-size_t bob::python::array_to_ndim(const boost::python::numeric::array& a) {
-  return PyArray_NDIM(TP_ARRAY(a));
-}
-
-#define TP_DESCR(x) ((PyArray_Descr*)x.ptr())
-
-bob::python::dtype::dtype (boost::python::object dtype_like) {
-  PyArray_Descr* tmp = 0;
-  if (!PyArray_DescrConverter2(dtype_like.ptr(), &tmp)) {
-    std::string dtype_str = boost::python::extract<std::string>(boost::python::str(dtype_like));
-    PYTHON_ERROR(TypeError, "cannot convert input dtype-like object (%s) to proper dtype", dtype_str.c_str());
-  }
-  boost::python::handle<> hdl(boost::python::borrowed((PyObject*)tmp));
-  m_self = boost::python::object(hdl);
-}
-
-bob::python::dtype::dtype (PyArray_Descr* descr) {
-  if (descr) {
-    boost::python::handle<> hdl((PyObject*)descr); //< raises if NULL
-    m_self = boost::python::object(hdl);
-  }
-}
-
-bob::python::dtype::dtype(int typenum) {
-  PyArray_Descr* tmp = PyArray_DescrFromType(typenum);
-  boost::python::handle<> hdl(boost::python::borrowed((PyObject*)tmp));
-  m_self = boost::python::object(hdl);
-}
-
-bob::python::dtype::dtype(bob::io::base::array::ElementType eltype) {
-  if (eltype != bob::io::base::array::t_unknown) {
-    PyArray_Descr* tmp = PyArray_DescrFromType(bob::python::type_to_num(eltype));
-    boost::python::handle<> hdl(boost::python::borrowed((PyObject*)tmp));
-    m_self = boost::python::object(hdl);
-  }
-}
-
-bob::python::dtype::dtype(const bob::python::dtype& other): m_self(other.m_self)
-{
-}
-
-bob::python::dtype::dtype() {
-}
-
-bob::python::dtype::~dtype() { }
-
-bob::python::dtype& bob::python::dtype::operator= (const bob::python::dtype& other) {
-  m_self = other.m_self;
-  return *this;
-}
-
-bool bob::python::dtype::has_native_byteorder() const {
-  return TPY_ISNONE(m_self)? false : (PyArray_EquivByteorders(TP_DESCR(m_self)->byteorder, NPY_NATIVE) || TP_DESCR(m_self)->elsize == 1);
-}
-
-bool bob::python::dtype::has_type(bob::io::base::array::ElementType _eltype) const {
-  return eltype() == _eltype;
-}
-
-bob::io::base::array::ElementType bob::python::dtype::eltype() const {
-  return TPY_ISNONE(m_self)? bob::io::base::array::t_unknown :
-    bob::python::num_to_type(TP_DESCR(m_self)->type_num);
-}
-
-int bob::python::dtype::type_num() const {
-  return TPY_ISNONE(m_self)? -1 : TP_DESCR(m_self)->type_num;
-}
-
-boost::python::str bob::python::dtype::str() const {
-  return boost::python::str(m_self);
-}
-
-std::string bob::python::dtype::cxx_str() const {
-  return boost::python::extract<std::string>(this->str());
-}
-
-PyArray_Descr* bob::python::dtype::descr() {
-  return (PyArray_Descr*)m_self.ptr();
-}
-
-/****************************************************************************
- * Free methods                                                             *
- ****************************************************************************/
-
-void bob::python::typeinfo_ndarray_ (const boost::python::object& o, bob::io::base::array::typeinfo& i) {
-  PyArrayObject* npy = TP_ARRAY(o);
-  npy_intp strides[NPY_MAXDIMS];
-  for (int k=0; k<PyArray_NDIM(npy); ++k) strides[k] = PyArray_STRIDES(npy)[k]/PyArray_DESCR(npy)->elsize;
-  i.set<npy_intp>(bob::python::num_to_type(PyArray_DESCR(npy)->type_num), PyArray_NDIM(npy), PyArray_DIMS(npy), strides);
-}
-
-void bob::python::typeinfo_ndarray (const boost::python::object& o, bob::io::base::array::typeinfo& i) {
-  if (!PyArray_Check(o.ptr())) {
-    throw std::runtime_error("invalid input: cannot extract typeinfo object from anything else than ndarray");
-  }
-  bob::python::typeinfo_ndarray_(o, i);
-}
-
-#if NPY_FEATURE_VERSION < NUMPY16_API /* NumPy C-API version >= 1.6 */
-/**
- * This method emulates the behavior of PyArray_GetArrayParamsFromObject from
- * NumPy >= 1.6 and is used when compiling and liking against older versions of
- * NumPy.
- */
-static int _GetArrayParamsFromObject(PyObject* op,
-    PyArray_Descr* requested_dtype,
-    npy_bool writeable,
-    PyArray_Descr** out_dtype,
-    int* out_ndim,
-    npy_intp* out_dims,
-    PyArrayObject** out_arr,
-    PyObject*) {
-
-  if (PyArray_Check(op)) { //it is already an array, easy
-
-    PyArrayObject* arr = reinterpret_cast<PyArrayObject*>(op);
-
-    if (requested_dtype && !PyArray_EquivTypes(PyArray_DESCR(arr), requested_dtype)) {
-
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-      if (PyArray_CanCastTypeTo(PyArray_DESCR(arr), requested_dtype, NPY_SAFE_CASTING))
-#else
-      if (PyArray_CanCastTo(PyArray_DESCR(arr), requested_dtype))
-#endif
-      {
-        (*out_arr) = 0;
-        (*out_dtype) = PyArray_DESCR(arr);
-        (*out_ndim) = PyArray_NDIM(arr);
-        for (int i=0; i<PyArray_NDIM(arr); ++i)
-          out_dims[i] = PyArray_DIM(arr,i);
-        // we need to cast the array, write-ability will not hold...
-        return writeable? 1 : 0;
-      }
-
-      else {
-        return 1;
-      }
-
-    }
-
-    //if you get to this point, the types are equivalent or there was no type
-    (*out_arr) = (PyArrayObject*)PyArray_FromArray(arr, 0, 0);
-    (*out_dtype) = 0;
-    (*out_ndim) = 0;
-    return writeable? (!PyArray_ISWRITEABLE(arr)) : 0;
-
-  }
-
-  else { //it is not an array -- try a brute-force conversion
-
-    TDEBUG1("[non-optimal] using NumPy version < 1.6 requires we convert input data for convertibility check - compile against NumPy >= 1.6 to improve performance");
-    boost::python::handle<> hdl(boost::python::allow_null(PyArray_FromAny(op, requested_dtype, 0, 0, 0, 0)));
-    boost::python::object array(hdl);
-
-    if (TPY_ISNONE(array)) return 1;
-
-    //if the conversion worked, you can now fill in the parameters
-    (*out_arr) = 0;
-    (*out_dtype) = PyArray_DESCR(TP_ARRAY(array));
-    (*out_ndim) = PyArray_NDIM(TP_ARRAY(array));
-    for (int i=0; i<PyArray_NDIM(TP_ARRAY(array)); ++i)
-      out_dims[i] = PyArray_DIM(TP_ARRAY(array),i);
-
-    //in this mode, the resulting object will never be write-able.
-    return writeable? 1 : 0;
-
-  }
-
-  return 0; //turn-off c compiler warnings...
-
-}
-#endif
-
-bob::python::convert_t bob::python::convertible(boost::python::object array_like, bob::io::base::array::typeinfo& info,
-    bool writeable, bool behaved) {
-
-  int ndim = 0;
-  npy_intp dims[NPY_MAXDIMS];
-  PyArrayObject* arr = 0;
-  PyArray_Descr* dtype = 0;
-
-  int not_convertible =
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-    PyArray_GetArrayParamsFromObject
-#else
-    _GetArrayParamsFromObject
-#endif
-    (array_like.ptr(), //input object pointer
-     0,                //requested dtype (if need to enforce)
-     writeable,        //writeable?
-     &dtype,           //dtype assessment - borrowed
-     &ndim,            //assessed number of dimensions
-     dims,             //assessed shape
-     &arr,             //if obj_ptr is ndarray, return it here
-     0)                //context?
-    ;
-
-  if (not_convertible) return bob::python::IMPOSSIBLE;
-
-  convert_t retval = bob::python::BYREFERENCE;
-
-  if (arr) { //the passed object is an array
-
-    //checks behavior.
-    if (behaved && !PyArray_ISCARRAY_RO(arr)) retval = bob::python::WITHARRAYCOPY;
-
-    info.set<npy_intp>(bob::python::num_to_type(PyArray_DESCR(arr)->type_num),
-        PyArray_NDIM(arr), PyArray_DIMS(arr));
-
-    Py_XDECREF(arr);
-  }
-
-  else { //the passed object is not an array
-    info.set<npy_intp>(bob::python::num_to_type(dtype->type_num), ndim, dims);
-    retval = bob::python::WITHCOPY;
-  }
-
-  return retval;
-}
-
-bob::python::convert_t bob::python::convertible_to (boost::python::object array_like,
-    const bob::io::base::array::typeinfo& info, bool writeable, bool behaved) {
-
-  bob::python::dtype req_dtype(info.dtype);
-
-  int ndim = 0;
-  npy_intp dims[NPY_MAXDIMS];
-  PyArrayObject* arr = 0;
-  PyArray_Descr* dtype = 0;
-
-  int not_convertible =
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-    PyArray_GetArrayParamsFromObject
-#else
-    _GetArrayParamsFromObject
-#endif
-    (array_like.ptr(),  //input object pointer
-     req_dtype.descr(), //requested dtype (if need to enforce)
-     writeable,         //writeable?
-     &dtype,            //dtype assessment
-     &ndim,             //assessed number of dimensions
-     dims,              //assessed shape
-     &arr,              //if obj_ptr is ndarray, return it here
-     0)                 //context?
-    ;
-
-  if (not_convertible) return bob::python::IMPOSSIBLE;
-
-  convert_t retval = bob::python::BYREFERENCE;
-
-  if (arr) { //the passed object is an array -- check compatibility
-
-    if (info.nd) { //check number of dimensions and shape, if needs to
-      if (PyArray_NDIM(arr) != (int)info.nd) {
-        Py_XDECREF(arr);
-        return bob::python::IMPOSSIBLE;
-      }
-      if (info.has_valid_shape()) {
-        for (size_t i=0; i<info.nd; ++i)
-          if ((int)info.shape[i] != PyArray_DIM(arr,i)) {
-            Py_XDECREF(arr);
-            return bob::python::IMPOSSIBLE;
-          }
-      }
-    }
-
-    //checks dtype
-    if (PyArray_DESCR(arr)->type_num != req_dtype.descr()->type_num) {
-      //well... we would need to cast, but maybe possible - check with NumPy
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-      if (PyArray_CanCastTypeTo(PyArray_DESCR(arr), req_dtype.descr(), NPY_SAFE_CASTING)) {
-#else
-      if (PyArray_CanCastTo(PyArray_DESCR(arr), req_dtype.descr())) {
-#endif
-        retval = bob::python::WITHARRAYCOPY;
-      }
-      else {
-        //we cannot cast from current to desired type, sorry...
-        Py_XDECREF(arr);
-        return bob::python::IMPOSSIBLE;
-      }
-    }
-
-    //checks behavior.
-    if (behaved) {
-      if (!PyArray_ISCARRAY_RO(arr)) retval = bob::python::WITHARRAYCOPY;
-    }
-
-    Py_XDECREF(arr);
-
-  }
-
-  else { //the passed object is not an array
-
-    retval = bob::python::WITHCOPY;
-
-    if (info.nd) { //check number of dimensions and shape
-      if (ndim != (int)info.nd) return bob::python::IMPOSSIBLE;
-      for (size_t i=0; i<info.nd; ++i)
-        if (info.shape[i] &&
-            (int)info.shape[i] != dims[i]) return bob::python::IMPOSSIBLE;
-    }
-
-    //checks dtype
-    if (dtype->type_num != req_dtype.descr()->type_num) {
-      //well... we would need to cast, but maybe possible - check with NumPy
-      if (
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-          !PyArray_CanCastTypeTo(dtype, req_dtype.descr(), NPY_SAFE_CASTING)
-#else
-          !PyArray_CanCastTo(dtype, req_dtype.descr())
-#endif
-         )
-      {
-        return bob::python::IMPOSSIBLE;
-      }
-
-    }
-
-  }
-
-  return retval;
-}
-
-bob::python::convert_t bob::python::convertible_to(boost::python::object array_like, boost::python::object dtype_like,
-    bool writeable, bool behaved) {
-
-  bob::python::dtype req_dtype(dtype_like);
-
-  int ndim = 0;
-  npy_intp dims[NPY_MAXDIMS];
-  PyArrayObject* arr = 0;
-  PyArray_Descr* dtype = 0;
-
-  int not_convertible =
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-    PyArray_GetArrayParamsFromObject
-#else
-    _GetArrayParamsFromObject
-#endif
-    (array_like.ptr(),  //input object pointer
-     req_dtype.descr(), //requested dtype (if need to enforce)
-     writeable,         //writeable?
-     &dtype,            //dtype assessment
-     &ndim,             //assessed number of dimensions
-     dims,              //assessed shape
-     &arr,              //if obj_ptr is ndarray, return it here
-     0)                 //context?
-    ;
-
-  if (not_convertible) return bob::python::IMPOSSIBLE;
-
-  convert_t retval = bob::python::BYREFERENCE;
-
-  if (arr) { //the passed object is an array -- check compatibility
-
-    //checks dtype
-    if (PyArray_DESCR(arr)->type_num != req_dtype.descr()->type_num) {
-      //well... we would need to cast, but maybe possible - check with NumPy
-
-      if (
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-          PyArray_CanCastTypeTo(PyArray_DESCR(arr), req_dtype.descr(), NPY_SAFE_CASTING)
-#else
-          PyArray_CanCastTo(PyArray_DESCR(arr), req_dtype.descr())
-#endif
-         )
-      {
-        retval = bob::python::WITHARRAYCOPY;
-      }
-      else {
-        //we cannot cast from current to desired type, sorry...
-        Py_XDECREF(arr);
-        return bob::python::IMPOSSIBLE;
-      }
-    }
-
-    //checks behavior.
-    if (behaved) {
-      if (!PyArray_ISCARRAY_RO(arr)) retval = bob::python::WITHARRAYCOPY;
-    }
-
-    Py_XDECREF(arr);
-
-  }
-
-  else { //the passed object is not an array
-
-    retval = bob::python::WITHCOPY;
-
-    //checks dtype
-    if (dtype->type_num != req_dtype.descr()->type_num) {
-      //well... we would need to cast, but maybe possible - check with NumPy
-      if (
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-          !PyArray_CanCastTypeTo(dtype, req_dtype.descr(), NPY_SAFE_CASTING)
-#else
-          !PyArray_CanCastTo(dtype, req_dtype.descr())
-#endif
-         )
-      {
-        return bob::python::IMPOSSIBLE;
-      }
-
-    }
-
-  }
-
-  return retval;
-}
-
-bob::python::convert_t bob::python::convertible_to(boost::python::object array_like, bool writeable,
-    bool behaved) {
-
-  int ndim = 0;
-  npy_intp dims[NPY_MAXDIMS];
-  PyArrayObject* arr = 0;
-  PyArray_Descr* dtype = 0;
-
-  int not_convertible =
-#if NPY_FEATURE_VERSION >= NUMPY16_API /* NumPy C-API version >= 1.6 */
-    PyArray_GetArrayParamsFromObject
-#else
-    _GetArrayParamsFromObject
-#endif
-    (array_like.ptr(), //input object pointer
-     0,                //requested dtype (if need to enforce)
-     writeable,        //writeable?
-     &dtype,           //dtype assessment
-     &ndim,            //assessed number of dimensions
-     dims,             //assessed shape
-     &arr,             //if obj_ptr is ndarray, return it here
-     0)                //context?
-    ;
-
-  if (not_convertible) return bob::python::IMPOSSIBLE;
-
-  convert_t retval = bob::python::BYREFERENCE;
-
-  if (arr) { //the passed object is an array -- check compatibility
-
-    //checks behavior.
-    if (behaved) {
-      if (!PyArray_ISCARRAY_RO(arr)) retval = bob::python::WITHARRAYCOPY;
-    }
-
-    Py_XDECREF(arr);
-
-  }
-
-  else { //the passed object is not an array
-
-     retval = bob::python::WITHCOPY;
-
-  }
-
-  return retval;
-}
-
-/***************************************************************************
- * Ndarray (PyArrayObject) manipulations                                   *
- ***************************************************************************/
-
-/**
- * Returns either a reference or a copy of the given array_like object,
- * depending on the following requirements for referral:
- *
- * 0. The pointed object is a numpy.ndarray
- * 1. The array type description type_num matches
- * 2. The array is C-style, contiguous and aligned
- */
-static boost::python::object try_refer_ndarray (boost::python::object array_like,
-    boost::python::object dtype_like) {
-
-  PyArrayObject* candidate = TP_ARRAY(array_like);
-  PyArray_Descr* req_dtype = 0;
-  PyArray_DescrConverter2(dtype_like.ptr(), &req_dtype); //new ref!
-
-  bool can_refer = true; //< flags a copy of the data
-
-  if (!PyArray_Check((PyObject*)candidate)) can_refer = false;
-
-  if (can_refer && !PyArray_ISCARRAY_RO(candidate)) can_refer = false;
-
-  if (can_refer) {
-    PyObject* tmp = PyArray_FromArray(candidate, 0, 0);
-    boost::python::handle<> hdl(tmp); //< raises if NULL
-    boost::python::object retval(hdl);
-    return retval;
-  }
-
-  //copy
-  TDEBUG1("[non-optimal] copying array-like object - cannot refer");
-  PyObject* _ptr = (PyObject*)candidate;
-#if NPY_FEATURE_VERSION > NUMPY16_API /* NumPy C-API version > 1.6 */
-  int flags = NPY_ARRAY_C_CONTIGUOUS|NPY_ARRAY_ENSURECOPY|NPY_ARRAY_ENSUREARRAY;
-#else
-  int flags = NPY_C_CONTIGUOUS|NPY_ENSURECOPY|NPY_ENSUREARRAY;
-#endif
-  PyObject* tmp = PyArray_FromAny(_ptr, req_dtype, 0, 0, flags, 0);
-  boost::python::handle<> hdl(tmp); //< raises if NULL
-  boost::python::object retval(hdl);
-  return retval;
-
-}
-
-static void derefer_ndarray (PyArrayObject* array) {
-  Py_XDECREF(array);
-}
-
-static boost::shared_ptr<void> shared_from_ndarray (boost::python::object& o) {
-  boost::shared_ptr<PyArrayObject> cache(TP_ARRAY(o),
-      std::ptr_fun(derefer_ndarray));
-  Py_XINCREF(TP_OBJECT(o)); ///< makes sure it outlives this scope!
-  return cache; //casts to b::shared_ptr<void>
-}
-
-bob::python::py_array::py_array(boost::python::object o, boost::python::object _dtype):
-  m_is_numpy(true)
-{
-  if (TPY_ISNONE(o)) PYTHON_ERROR(TypeError, "You cannot pass 'None' as input parameter to C++-bound bob methods that expect NumPy ndarrays (or blitz::Array<T,N>'s). Double-check your input!");
-  boost::python::object mine = try_refer_ndarray(o, _dtype);
-
-  //captures data from a numeric::array
-  typeinfo_ndarray_(mine, m_type);
-
-  //transforms the from boost::python ref counting to boost::shared_ptr<void>
-  m_data = shared_from_ndarray(mine);
-
-  //set-up the C-style pointer to this data
-  m_ptr = static_cast<void*>(PyArray_DATA(TP_ARRAY(mine)));
-}
-
-bob::python::py_array::py_array(const bob::io::base::array::interface& other) {
-  set(other);
-}
-
-bob::python::py_array::py_array(boost::shared_ptr<bob::io::base::array::interface> other) {
-  set(other);
-}
-
-bob::python::py_array::py_array(const bob::io::base::array::typeinfo& info) {
-  set(info);
-}
-
-bob::python::py_array::~py_array() {
-}
-
-/**
- * Wrap a C-style pointer with a PyArrayObject
- */
-static boost::python::object wrap_data (void* data, const bob::io::base::array::typeinfo& ti,
-    bool writeable=true) {
-
-  npy_intp shape[NPY_MAXDIMS];
-  npy_intp stride[NPY_MAXDIMS];
-  for (size_t k=0; k<ti.nd; ++k) {
-    shape[k] = ti.shape[k];
-    stride[k] = ti.item_size()*ti.stride[k];
-  }
-  PyObject* tmp = PyArray_New(&PyArray_Type, ti.nd,
-        &shape[0], bob::python::type_to_num(ti.dtype), &stride[0], data, 0,
-#if NPY_FEATURE_VERSION > NUMPY16_API /* NumPy C-API version > 1.6 */
-        writeable? NPY_ARRAY_CARRAY : NPY_ARRAY_CARRAY_RO
-#else
-        writeable? NPY_CARRAY : NPY_CARRAY_RO
-#endif
-        ,0);
-
-  boost::python::handle<> hdl(tmp);
-  boost::python::object retval(hdl);
-  return retval;
-}
-
-/**
- * Creates a new array from specifications
- */
-static boost::python::object make_ndarray(int nd, npy_intp* dims, int type) {
-  PyObject* tmp = PyArray_SimpleNew(nd, dims, type);
-  boost::python::handle<> hdl(tmp); //< raises if NULL
-  boost::python::object retval(hdl);
-  return retval;
-}
-
-/**
- * New copy of the array from another array
- */
-static boost::python::object copy_array (const boost::python::object& array) {
-  PyArrayObject* _p = TP_ARRAY(array);
-  boost::python::object retval = make_ndarray(PyArray_NDIM(_p), PyArray_DIMS(_p), PyArray_DESCR(_p)->type_num);
-  PyArray_CopyInto(TP_ARRAY(retval), TP_ARRAY(array));
-  return retval;
-}
-
-/**
- * Copies a data pointer and type into a new numpy array.
- */
-static boost::python::object copy_data (const void* data, const bob::io::base::array::typeinfo& ti) {
-  boost::python::object wrapped = wrap_data(const_cast<void*>(data), ti);
-  boost::python::object retval = copy_array (wrapped);
-  return retval;
-}
-
-void bob::python::py_array::set(const bob::io::base::array::interface& other) {
-  TDEBUG1("[non-optimal] buffer copying operation being performed for "
-      << other.type().str());
-
-  //performs a copy of the data into a numpy array
-  boost::python::object mine = copy_data(other.ptr(), m_type);
-
-  //captures data from a numeric::array
-  typeinfo_ndarray_(mine, m_type);
-
-  //transforms the from boost::python ref counting to boost::shared_ptr<void>
-  m_data = shared_from_ndarray(mine);
-
-  //set-up the C-style pointer to this data
-  m_ptr = static_cast<void*>(PyArray_DATA(TP_ARRAY(mine)));
-
-  m_is_numpy = true;
-}
-
-void bob::python::py_array::set(boost::shared_ptr<bob::io::base::array::interface> other) {
-  m_type = other->type();
-  m_is_numpy = false;
-  m_ptr = other->ptr();
-  m_data = other->owner();
-}
-
-/**
- * Creates a new numpy array from a bob::io::typeinfo object.
- */
-static boost::python::object new_from_type (const bob::io::base::array::typeinfo& ti) {
-  npy_intp shape[NPY_MAXDIMS];
-  npy_intp stride[NPY_MAXDIMS];
-  for (size_t k=0; k<ti.nd; ++k) {
-    shape[k] = ti.shape[k];
-    stride[k] = ti.item_size()*ti.stride[k];
-  }
-  PyObject* tmp = PyArray_New(&PyArray_Type, ti.nd, &shape[0],
-      bob::python::type_to_num(ti.dtype), &stride[0], 0, 0, 0, 0);
-  boost::python::handle<> hdl(tmp); //< raises if NULL
-  boost::python::object retval(hdl);
-  return retval;
-}
-
-void bob::python::py_array::set (const bob::io::base::array::typeinfo& req) {
-  if (m_type.is_compatible(req)) return; ///< nothing to do!
-
-  TDEBUG1("[non-optimal?] buffer re-size being performed from " << m_type.str()
-      << " to " << req.str());
-
-  boost::python::object mine = new_from_type(req);
-
-  //captures data from a numeric::array
-  typeinfo_ndarray_(mine, m_type);
-
-  //transforms the from boost::python ref counting to boost::shared_ptr<void>
-  m_data = shared_from_ndarray(mine);
-
-  //set-up the C-style pointer to this data
-  m_ptr = static_cast<void*>(PyArray_DATA(TP_ARRAY(mine)));
-
-  m_is_numpy = true;
-}
-
-boost::python::object bob::python::py_array::copy(const boost::python::object& dtype) {
-  return copy_data(m_ptr, m_type);
-}
-
-/**
- * Gets a read-only reference to a certain data. This recipe was originally
- * posted here:
- * http://blog.enthought.com/python/numpy-arrays-with-pre-allocated-memory/
- *
- * But a better allocation strategy (that actually works) is posted here:
- * http://stackoverflow.com/questions/2924827/numpy-array-c-api
- */
-// PyCapsule is only available on Python2.7 or Python3.1 and up
-#if ((PY_VERSION_HEX <  0x02070000) \
-    || ((PY_VERSION_HEX >= 0x03000000) \
-      && (PY_VERSION_HEX <  0x03010000)) )
-static void DeleteSharedPointer (void* ptr) {
-  typedef boost::shared_ptr<const void> type;
-  delete static_cast<type*>(ptr);
-}
-#else
-static void DeleteSharedPointer (PyObject* ptr) {
-  typedef boost::shared_ptr<const void> type;
-  delete static_cast<type*>(PyCapsule_GetPointer(ptr, NULL));
-}
-#endif
-
-static boost::python::object make_readonly (const void* data, const bob::io::base::array::typeinfo& ti,
-    boost::shared_ptr<const void> owner) {
-
-  boost::python::object retval = wrap_data(const_cast<void*>(data), ti, false);
-
-  //creates the shared pointer deallocator
-  boost::shared_ptr<const void>* ptr = new boost::shared_ptr<const void>(owner);
-#if ((PY_VERSION_HEX <  0x02070000) \
-    || ((PY_VERSION_HEX >= 0x03000000) \
-      && (PY_VERSION_HEX <  0x03010000)) )
-  PyObject* py_sharedptr = PyCObject_FromVoidPtr(ptr, DeleteSharedPointer);
-#else
-  PyObject* py_sharedptr = PyCapsule_New((void*)ptr, NULL, DeleteSharedPointer);
-#endif
-
-  if (!py_sharedptr) {
-    PYTHON_ERROR(RuntimeError, "could not allocate space for deallocation object in read-only array::interface wrapping");
-  }
-
-# if NPY_FEATURE_VERSION >= NUMPY17_API /* NumPy C-API version >= 1.7 */
-  PyArray_SetBaseObject(TP_ARRAY(retval), py_sharedptr);
-# else
-  TP_ARRAY(retval)->base = py_sharedptr;
-# endif
-
-  return retval;
-}
-
-boost::python::object bob::python::py_array::pyobject() {
-  if (m_is_numpy) {
-    boost::python::handle<> hdl(boost::python::borrowed(boost::static_pointer_cast<PyObject>(m_data).get()));
-    boost::python::object mine(hdl);
-    return mine;
-  }
-
-  //if you really want, I can wrap it for you, but in this case I'll make it
-  //read-only and will associate the object deletion to my own data pointer.
-  return make_readonly(m_ptr, m_type, m_data);
-}
-
-bool bob::python::py_array::is_writeable() const {
-  return (!m_is_numpy || PyArray_ISWRITEABLE(boost::static_pointer_cast<PyArrayObject>(m_data).get()));
-}
-
-bob::python::ndarray::ndarray(boost::python::object array_like, boost::python::object dtype_like)
-  : px(new bob::python::py_array(array_like, dtype_like)) {
-}
-
-bob::python::ndarray::ndarray(boost::python::object array_like)
-  : px(new bob::python::py_array(array_like, boost::python::object())) {
-  }
-
-bob::python::ndarray::ndarray(const bob::io::base::array::typeinfo& info)
-  : px(new bob::python::py_array(info)) {
-  }
-
-bob::python::ndarray::~ndarray() { }
-
-const bob::io::base::array::typeinfo& bob::python::ndarray::type() const {
-  return px->type();
-}
-
-boost::python::object bob::python::ndarray::self() { return px->pyobject(); }
-
-bob::python::const_ndarray::const_ndarray(boost::python::object array_like)
-  : bob::python::ndarray(array_like) {
-  }
-
-bob::python::const_ndarray::~const_ndarray() { }
-
diff --git a/bob/learn/misc/old/ndarray.h b/bob/learn/misc/old/ndarray.h
deleted file mode 100644
index 4090a0f5807083dcd5326b6a5085d5d52536b9ea..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/ndarray.h
+++ /dev/null
@@ -1,640 +0,0 @@
-/**
- * @file bob/python/ndarray.h
- * @date Tue Jan 18 17:07:26 2011 +0100
- * @author André Anjos <andre.anjos@idiap.ch>
- *
- * @brief A boost::python extension object that plays the role of a NumPy
- * ndarray (PyArrayObject*) and bob::io::base::array::interface at the same time.
- *
- * Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
- */
-
-#ifndef BOB_PYTHON_NDARRAY_H
-#define BOB_PYTHON_NDARRAY_H
-
-#include <boost/python.hpp> //this has to come before the next declaration!
-#include <boost/format.hpp>
-
-// Define the numpy C-API we are compatible with
-#define NPY_NO_DEPRECATED_API NPY_1_7_API_VERSION
-#include <numpy/arrayobject.h>
-
-#include "exception.h"
-#include <bob.io.base/array.h>
-#include <bob.core/cast.h>
-
-#include <blitz/array.h>
-#include <stdint.h>
-
-/**
- * The method object::is_none() was only introduced in boost v1.43.
- */
-#if defined(BOOST_VERSION)
-#  undef BOOST_VERSION
-#  include <boost/version.hpp>
-#  if BOOST_VERSION >= 104300
-#  define TPY_ISNONE(x) x.is_none()
-#  else
-#  define TPY_ISNONE(x) (x.ptr() == Py_None)
-#  endif
-#endif
-
-/**
- * A macro that is replaced by the proper format definition for size_t
- */
-#ifdef __LP64__
-#  define SIZE_T_FMT "%lu"
-#else
-#  define SIZE_T_FMT "%u"
-#endif
-
-namespace bob { namespace python {
-
-  /**
-   * @brief Initializes numpy and boost bindings. Should be called once per
-   * module.
-   *
-   * Pass to it the module doc string and it will also update the module
-   * documentation string.
-   */
-  void setup_python(const char* module_docstring);
-
-  /**
-   * @brief A generic method to convert from ndarray type_num to bob's
-   * ElementType
-   */
-  bob::io::base::array::ElementType num_to_type(int num);
-
-  /**
-   * @brief A method to retrieve the type of element of an array
-   */
-  bob::io::base::array::ElementType array_to_type(const boost::python::numeric::array& a);
-
-  /**
-   * @brief Retrieves the number of dimensions in an array
-   */
-  size_t array_to_ndim(const boost::python::numeric::array& a);
-
-  /**
-   * @brief Converts from C/C++ type to ndarray type_num.
-   */
-  template <typename T> int ctype_to_num(void) {
-    PYTHON_ERROR(TypeError, "unsupported C/C++ type (%s)", bob::io::base::array::stringize<T>());
-  }
-
-  // The C/C++ types we support should be declared here.
-  template <> int ctype_to_num<bool>(void);
-  template <> int ctype_to_num<signed char>(void);
-  template <> int ctype_to_num<unsigned char>(void);
-  template <> int ctype_to_num<short>(void);
-  template <> int ctype_to_num<unsigned short>(void);
-  template <> int ctype_to_num<int>(void);
-  template <> int ctype_to_num<unsigned int>(void);
-  template <> int ctype_to_num<long>(void);
-  template <> int ctype_to_num<unsigned long>(void);
-  template <> int ctype_to_num<long long>(void);
-  template <> int ctype_to_num<unsigned long long>(void);
-  template <> int ctype_to_num<float>(void);
-  template <> int ctype_to_num<double>(void);
-#ifdef NPY_FLOAT128
-  template <> int ctype_to_num<long double>(void);
-#endif
-  template <> int ctype_to_num<std::complex<float> >(void);
-  template <> int ctype_to_num<std::complex<double> >(void);
-#ifdef NPY_COMPLEX256
-  template <> int ctype_to_num<std::complex<long double> >(void);
-#endif
-
-  /**
-   * @brief Converts from bob's Element type to ndarray type_num
-   */
-  int type_to_num(bob::io::base::array::ElementType type);
-
-  /**
-   * @brief Handles conversion checking possibilities
-   */
-  typedef enum {
-    IMPOSSIBLE = 0,    ///< not possible to get array from object
-    BYREFERENCE = 1,   ///< possible, by only referencing the array
-    WITHARRAYCOPY = 2, ///< possible, object is an array, but has to copy
-    WITHCOPY = 3       ///< possible, object is not an array, has to copy
-  } convert_t;
-
-  /**
-   * @brief Extracts the typeinfo object from a numeric::array (passed as
-   * boost::python::object). We check the input object to assure it is a valid
-   * ndarray. An exception may be thrown otherwise.
-   */
-  void typeinfo_ndarray (const boost::python::object& o,
-      bob::io::base::array::typeinfo& i);
-
-  /**
-   * @brief This is the same as above, but does not run any check on the input
-   * object "o".
-   */
-  void typeinfo_ndarray_ (const boost::python::object& o,
-      bob::io::base::array::typeinfo& i);
-
-  /**
-   * @brief Checks if an array-like object is convertible to become a NumPy
-   * ndarray (boost::python::numeric::array). If so, write the typeinfo
-   * information that such array would have upon automatic conversion to "info".
-   *
-   * Optionally, you can specify you do *not* want writeable or behavior to be
-   * checked. Write-ability means that an array area can be extracted from the
-   * "array_like" object and changes done to the converted ndarray will be
-   * reflected upon the original object.
-   *
-   * Behavior refers to two settings: first, the data type byte-order should be
-   * native (i.e., little-endian on little-endian machines and big-endian on
-   * big-endian machines). Secondly, the array must be C-Style, have its memory
-   * aligned and on a contiguous block.
-   *
-   * This method is more efficient than actually performing the conversion,
-   * unless you compile the project against NumPy < 1.6 in which case the
-   * built-in checks are not available and you we will emulate them with
-   * brute-force conversion if required. A level-1 DEBUG message will be output
-   * if a brute-force copy is required so you can debug for that.
-   *
-   * This method returns the convertibility status for the array-like object,
-   * which is one of:
-   *
-   * * IMPOSSIBLE: The object cannot, possibly, be converted into an ndarray
-   * * BYREFERENCE: The object will successfuly be converted to a ndarray, i.e.
-   *                in the most optimal way - by referring to it.
-   * * WITHARRAYCOPY: The object will successfuly be converted to a ndarray,
-   *                  but that will require an array copy. That means the
-   *                  object is already an array, but not of the type you
-   *                  requested.
-   * * WITHCOPY: The object will successfuly be converted to a ndarray, but
-   *             we will need to convert the object from its current format
-   *             (non-ndarray) to a ndarray format. In this case, we will not
-   *             be able to implement write-back.
-   */
-  convert_t convertible(boost::python::object array_like,
-      bob::io::base::array::typeinfo& info, bool writeable=true,
-      bool behaved=true);
-
-  /**
-   * @brief This method does the same as convertible(), but specifies a type
-   * information to which the destination array needs to have. Same rules
-   * apply.
-   *
-   * The typeinfo input is honoured like this:
-   *
-   * 1. The "dtype" component is enforced on the array object
-   * 2. If "nd" != 0, the number of dimensions is checked.
-   * 3. If 2. holds, shape values are checked if has_valid_shape() is 'true'
-   */
-  convert_t convertible_to (boost::python::object array_like,
-      const bob::io::base::array::typeinfo& info, bool writeable=true,
-      bool behaved=true);
-
-  /**
-   * @brief Same as above, but only requires dtype convertibility.
-   */
-  convert_t convertible_to (boost::python::object array_like,
-      boost::python::object dtype_like, bool writeable=true,
-      bool behaved=true);
-
-  /**
-   * @brief Same as above, but requires nothing, just simple convertibility.
-   */
-  convert_t convertible_to (boost::python::object array_like,
-      bool writeable=true, bool behaved=true);
-
-  class dtype {
-
-    public: //api
-
-      /**
-       * @brief Builds a new dtype object from another object.
-       */
-      dtype (boost::python::object dtype_like);
-
-      /**
-       * @brief Builds a new dtype object from a PyArray_Descr object that
-       * will have its own reference counting increased internally. So, the
-       * object is *not* stolen and you can Py_(X)DECREF() it when done if
-       * you so wish.
-       */
-      dtype (PyArray_Descr* descr);
-
-      /**
-       * @brief Builds a new dtype object from a numpy type_num integer
-       */
-      dtype(int npy_typenum);
-
-      /**
-       * @brief Builds a new dtype object from a bob element type
-       */
-      dtype(bob::io::base::array::ElementType eltype);
-
-      /**
-       * @brief Copy constructor
-       */
-      dtype(const dtype& other);
-
-      /**
-       * @brief Default constructor -- use default dtype from NumPy
-       */
-      dtype();
-
-      /**
-       * @brief D'tor virtualization
-       */
-      virtual ~dtype();
-
-      /**
-       * @brief Assignment
-       */
-      dtype& operator= (const dtype& other);
-
-      /**
-       * @brief Some checks
-       */
-      bool has_native_byteorder() const; ///< byte order is native
-      bool has_type(bob::io::base::array::ElementType eltype) const; ///< matches
-
-      /**
-       * @brief Returns the current element type
-       */
-      bob::io::base::array::ElementType eltype() const;
-
-      /**
-       * @brief Returns the current type num or -1, if I'm None
-       */
-      int type_num() const;
-
-      /**
-       * @brief Returns a boost::python representation of this object - maybe
-       * None.
-       */
-      inline boost::python::object self() const { return m_self; }
-
-      /**
-       * @brief Returns a borrowed reference to my PyArray_Descr* object.
-       */
-      inline PyArray_Descr* descr();
-
-      /**
-       * @brief Returns the bp::str() object for myself
-       */
-      boost::python::str str() const;
-
-      /**
-       * @brief Returns str(*this) as a std::string
-       */
-      std::string cxx_str() const;
-
-    private: //representation
-
-      boost::python::object m_self;
-
-  };
-
-  class py_array: public bob::io::base::array::interface {
-
-    public: //api
-
-      /**
-       * @brief Builds a new array from an array-like object but coerces to a
-       * certain type.
-       *
-       * @param array_like An ndarray object, inherited type or any object that
-       * can be cast into an array. Note that, in case of casting, we will need
-       * to copy the data. Otherwise, we just refer.
-       *
-       * @param dtype_like Anything that can be cast to a description type.
-       */
-      py_array(boost::python::object array_like,
-              boost::python::object dtype_like);
-
-      /**
-       * @brief Builds a new array copying the data of an existing buffer.
-       */
-      py_array(const bob::io::base::array::interface& buffer);
-
-      /**
-       * @brief Builds a new array by referring to the data of an existing
-       * buffer.
-       */
-      py_array(boost::shared_ptr<bob::io::base::array::interface> buffer);
-
-      /**
-       * @brief Builds a new array from scratch using the typeinfo. This array
-       * will be a NumPy ndarray internally.
-       */
-      py_array(const bob::io::base::array::typeinfo& info);
-
-      template <typename T>
-      py_array(bob::io::base::array::ElementType t, T d0) {
-        set(bob::io::base::array::typeinfo(t, (T)1, &d0));
-      }
-      template <typename T>
-      py_array(bob::io::base::array::ElementType t, T d0, T d1) {
-        T shape[2] = {d0, d1};
-        set(bob::io::base::array::typeinfo(t, (T)2, &shape[0]));
-      }
-      template <typename T>
-      py_array(bob::io::base::array::ElementType t, T d0, T d1, T d2) {
-        T shape[3] = {d0, d1, d2};
-        set(bob::io::base::array::typeinfo(t, (T)3, &shape[0]));
-      }
-      template <typename T>
-      py_array(bob::io::base::array::ElementType t, T d0, T d1, T d2, T d3) {
-        T shape[4] = {d0, d1, d2, d3};
-        set(bob::io::base::array::typeinfo(t, (T)4, &shape[0]));
-      }
-      template <typename T>
-      py_array(bob::io::base::array::ElementType t, T d0, T d1, T d2, T d3, T d4)
-      {
-        T shape[5] = {d0, d1, d2, d3, d4};
-        set(bob::io::base::array::typeinfo(t, (T)5, &shape[0]));
-      }
-
-      /**
-       * @brief D'tor virtualization
-       */
-      virtual ~py_array();
-
-      /**
-       * @brief Copies the data from another buffer.
-       */
-      virtual void set(const bob::io::base::array::interface& buffer);
-
-      /**
-       * @brief Refers to the data of another buffer.
-       */
-      virtual void set(boost::shared_ptr<bob::io::base::array::interface> buffer);
-
-      /**
-       * @brief Re-allocates this buffer taking into consideration new
-       * requirements. The internal memory should be considered uninitialized.
-       */
-      virtual void set (const bob::io::base::array::typeinfo& req);
-
-      /**
-       * @brief Type information for this buffer.
-       */
-      virtual const bob::io::base::array::typeinfo& type() const { return m_type; }
-
-      /**
-       * @brief Borrows a reference from the underlying memory. This means
-       * this object continues to be responsible for deleting the memory and
-       * you should make sure that it outlives the usage of the returned
-       * pointer.
-       */
-      virtual void* ptr() { return m_ptr; }
-      virtual const void* ptr() const { return m_ptr; }
-
-      /**
-       * @brief Gets a handle to the owner of this buffer.
-       */
-      virtual boost::shared_ptr<void> owner() { return m_data; }
-      virtual boost::shared_ptr<const void> owner() const { return m_data; }
-
-      /**
-       * @brief Cast the array to a different type by copying. If the type is
-       * omitted, we just make a plain copy of this array.
-       */
-      virtual boost::python::object copy
-        (const boost::python::object& dtype = boost::python::object());
-
-      /**
-       * @brief Gets a shallow copy of this array, if internally it is a NumPy
-       * array. Otherwise, returns a wrapper around the internal buffer memory
-       * and correctly reference counts it so the given object becomes
-       * responsible for the internal buffer as well.
-       *
-       * For this technique to always succeed, we use the recommendation for
-       * generating the numpy arrays with a special de-allocator as found here:
-       * http://blog.enthought.com/python/numpy-arrays-with-pre-allocated-memory
-       */
-      virtual boost::python::object pyobject();
-
-      /**
-       * @brief type cast operator converting this object into a boost::python::object
-       */
-      operator boost::python::object(){return pyobject();}
-
-      /**
-       * @brief Tells if the buffer is writeable
-       */
-      virtual bool is_writeable() const; ///< PyArray_ISWRITEABLE
-
-    private: //representation
-
-      bob::io::base::array::typeinfo m_type; ///< type information
-      void* m_ptr; ///< pointer to the data
-      bool m_is_numpy; ///< true if initiated with a NumPy array
-      boost::shared_ptr<void> m_data; ///< Pointer to the data owner
-
-  };
-
-  /**
-   * @brief The ndarray class is just a smart pointer wrapper over the
-   * concrete implementation of py_array.
-   */
-  class ndarray {
-
-    public: //api
-
-      /**
-       * @brief Builds a new array from an array-like object but coerces to a
-       * certain type.
-       *
-       * @param array_like An ndarray object, inherited type or any object that
-       * can be cast into an array. Note that, in case of casting, we will need
-       * to copy the data. Otherwise, we just refer.
-       *
-       * @param dtype_like Anything that can be cast to a description type.
-       */
-      ndarray(boost::python::object array_like,
-          boost::python::object dtype_like);
-
-      /**
-       * @brief Builds a new array from an array-like object but coerces to a
-       * certain type.
-       *
-       * @param array_like An ndarray object, inherited type or any object that
-       * can be cast into an array. Note that, in case of casting, we will need
-       * to copy the data. Otherwise, we just refer.
-       */
-      ndarray(boost::python::object array_like);
-
-      /**
-       * @brief Builds a new array from scratch using a type and shape
-       */
-      ndarray(const bob::io::base::array::typeinfo& info);
-
-      template <typename T>
-      ndarray(bob::io::base::array::ElementType t, T d0)
-        : px(new py_array(t, d0)) { }
-      template <typename T>
-      ndarray(bob::io::base::array::ElementType t, T d0, T d1)
-        : px(new py_array(t, d0, d1)) { }
-      template <typename T>
-      ndarray(bob::io::base::array::ElementType t, T d0, T d1, T d2)
-        : px(new py_array(t, d0, d1, d2)) { }
-      template <typename T>
-      ndarray(bob::io::base::array::ElementType t, T d0, T d1, T d2, T d3)
-        : px(new py_array(t, d0, d1, d2, d3)) { }
-      template <typename T>
-      ndarray(bob::io::base::array::ElementType t, T d0, T d1, T d2, T d3, T d4)
-        : px(new py_array(t, d0, d1, d2, d3, d4)) { }
-
-      /**
-       * @brief D'tor virtualization
-       */
-      virtual ~ndarray();
-
-      /**
-       * @brief Returns the type information
-       */
-      virtual const bob::io::base::array::typeinfo& type() const;
-
-      /**
-       * @brief Returns the underlying python representation.
-       */
-      virtual boost::python::object self();
-
-      /**
-       * @brief type cast operator converting this object into a boost::python::object
-       */
-      operator boost::python::object(){return self();}
-
-      /**
-       * @brief Returns a temporary blitz::Array<> skin over this ndarray.
-       *
-       * Attention: If you use this method, you have to make sure that this
-       * ndarray outlives the blitz::Array<> and that such blitz::Array<> will
-       * not be re-allocated or have any other changes made to it, except for
-       * the data contents.
-       */
-      template <typename T, int N> blitz::Array<T,N> bz () {
-
-        typedef blitz::Array<T,N> array_type;
-        typedef blitz::TinyVector<int,N> shape_type;
-
-        const bob::io::base::array::typeinfo& info = px->type();
-
-        if (info.nd != N) {
-          boost::format mesg("cannot wrap numpy.ndarray(%s,%d) as blitz::Array<%s,%s> - dimensions do not match");
-          mesg % bob::io::base::array::stringize(info.dtype) % info.nd;
-          mesg % bob::io::base::array::stringize<T>() % N;
-          throw std::runtime_error(mesg.str().c_str());
-        }
-
-        if (info.dtype != bob::io::base::array::getElementType<T>()) {
-          boost::format mesg("cannot wrap numpy.ndarray(%s,%d) as blitz::Array<%s,%s> - data type does not match");
-          mesg % bob::io::base::array::stringize(info.dtype) % info.nd;
-          mesg % bob::io::base::array::stringize<T>() % N;
-          throw std::runtime_error(mesg.str().c_str());
-        }
-
-        shape_type shape;
-        shape_type stride;
-        for (size_t k=0; k<info.nd; ++k) {
-          shape[k] = info.shape[k];
-          stride[k] = info.stride[k];
-        }
-
-        //finally, we return the wrapper.
-        return array_type((T*)px->ptr(), shape, stride, blitz::neverDeleteData);
-      }
-
-    protected: //representation
-
-      boost::shared_ptr<py_array> px;
-
-  };
-
-  /**
-   * @brief A specialization of ndarray that is used to cast types from python
-   * that will **not** be modified in C++.
-   *
-   * Conversion requirements for this type can be made less restrictive since
-   * we consider the user just wants to pass a value to the method or function
-   * using this type. This opposes to the plain ndarray, in which the user may
-   * want to modify its contents by skinning it with a blitz::Array<> layer.
-   */
-  class const_ndarray: public ndarray {
-
-    public: //api
-
-      /**
-       * @brief Builds a new array from an array-like object but coerces to a
-       * certain type.
-       *
-       * @param array_like An ndarray object, inherited type or any object that
-       * can be cast into an array. Note that, in case of casting, we will need
-       * to copy the data. Otherwise, we just refer.
-       */
-      const_ndarray(boost::python::object array_like);
-
-      /**
-       * @brief D'tor virtualization
-       */
-      virtual ~const_ndarray();
-
-      /**
-       * @brief Returns a temporary blitz::Array<> skin over this const_ndarray,
-       * if possible, otherwise it will COPY the array to the requested type
-       * and returns the copy.
-       *
-       * Attention: If you use this method, you have to make sure that this
-       * ndarray outlives the blitz::Array<>, in case the data is not copied.
-       */
-      template <typename T, int N> const blitz::Array<T,N> cast() {
-        const bob::io::base::array::typeinfo& info = px->type();
-
-        if (info.nd != N) {
-          boost::format mesg("cannot wrap numpy.ndarray(%s,%d) as blitz::Array<%s,%s> - dimensions do not match");
-          mesg % bob::io::base::array::stringize(info.dtype) % info.nd;
-          mesg % bob::io::base::array::stringize<T>() % N;
-          throw std::runtime_error(mesg.str().c_str());
-        }
-
-        if (info.dtype == bob::io::base::array::getElementType<T>()) {
-          // Type and shape matches, return the shallow copy of the array.
-          return bz<T,N>();
-        }
-
-        // if we got here, we have to copy-cast
-        // call the correct version of the cast function
-        switch(info.dtype){
-          // boolean types
-          case bob::io::base::array::t_bool: return bob::core::array::cast<T>(bz<bool,N>());
-
-          // integral types
-          case bob::io::base::array::t_int8: return bob::core::array::cast<T>(bz<int8_t,N>());
-          case bob::io::base::array::t_int16: return bob::core::array::cast<T>(bz<int16_t,N>());
-          case bob::io::base::array::t_int32: return bob::core::array::cast<T>(bz<int32_t,N>());
-          case bob::io::base::array::t_int64: return bob::core::array::cast<T>(bz<int64_t,N>());
-
-          // unsigned integral types
-          case bob::io::base::array::t_uint8: return bob::core::array::cast<T>(bz<uint8_t,N>());
-          case bob::io::base::array::t_uint16: return bob::core::array::cast<T>(bz<uint16_t,N>());
-          case bob::io::base::array::t_uint32: return bob::core::array::cast<T>(bz<uint32_t,N>());
-          case bob::io::base::array::t_uint64: return bob::core::array::cast<T>(bz<uint64_t,N>());
-
-          // floating point types
-          case bob::io::base::array::t_float32: return bob::core::array::cast<T>(bz<float,N>());
-          case bob::io::base::array::t_float64: return bob::core::array::cast<T>(bz<double,N>());
-          case bob::io::base::array::t_float128: return bob::core::array::cast<T>(bz<long double,N>());
-
-          // complex types
-          case bob::io::base::array::t_complex64: return bob::core::array::cast<T>(bz<std::complex<float>,N>());
-          case bob::io::base::array::t_complex128: return bob::core::array::cast<T>(bz<std::complex<double>,N>());
-          case bob::io::base::array::t_complex256: return bob::core::array::cast<T>(bz<std::complex<long double>,N>());
-
-          default: throw std::runtime_error("cast to the given (unknown) data type is not possible yet");
-        }
-      }
-
-  };
-
-}}
-
-#endif /* BOB_PYTHON_NDARRAY_H */
diff --git a/bob/learn/misc/old/ndarray_numpy.cc b/bob/learn/misc/old/ndarray_numpy.cc
deleted file mode 100644
index 3b69a0a73757dae2581d22be40a3b213e91b6a34..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/ndarray_numpy.cc
+++ /dev/null
@@ -1,162 +0,0 @@
-/**
- * @author Andre Anjos <andre.anjos@idiap.ch>
- * @date Thu Nov 17 14:33:20 2011 +0100
- *
- * @brief Automatic converters to-from python for bob::python::ndarray
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-
-/**
- * Objects of this type create a binding between bob::python::ndarray and
- * NumPy arrays. You can specify a NumPy array as a parameter to a
- * bound method that would normally receive a blitz::Array<T,N> or a const
- * blitz::Array<T,N>& and the conversion will just magically happen, as
- * efficiently as possible.
- *
- * Please note that passing by value should be avoided as much as possible. In
- * this mode, the underlying method will still be able to alter the underlying
- * array storage area w/o being able to modify the array itself, causing a
- * gigantic mess. If you want to make something close to pass-by-value, just
- * pass by non-const reference instead.
- */
-struct ndarray_from_npy {
-
-  /**
-   * Registers converter from numpy array into a bob::python::ndarray
-   */
-  ndarray_from_npy() {
-    boost::python::converter::registry::push_back(&convertible, &construct,
-        boost::python::type_id<bob::python::ndarray>());
-  }
-
-  /**
-   * This method will determine if the input python object is convertible into
-   * an ndarray. To do that, the object has to be of type PyArrayObject
-   */
-  static void* convertible(PyObject* obj_ptr) {
-    if (PyArray_Check(obj_ptr)) return obj_ptr;
-    return 0;
-  }
-
-  /**
-   * This method will finally construct the C++ element out of the python
-   * object that was input. Please note that when boost::python reaches this
-   * method, the object has already been checked for convertibility.
-   */
-  static void construct(PyObject* obj_ptr,
-      boost::python::converter::rvalue_from_python_stage1_data* data) {
-
-    //black-magic required to setup the bob::python::ndarray storage area
-    void* storage = ((boost::python::converter::rvalue_from_python_storage<bob::python::ndarray>*)data)->storage.bytes;
-
-    boost::python::handle<> hdl(boost::python::borrowed(obj_ptr));
-    boost::python::object tmp(hdl);
-    new (storage) bob::python::ndarray(tmp);
-    data->convertible = storage;
-
-  }
-
-};
-
-/**
- * Objects of this type bind bob::python::ndarray's to numpy arrays. Your method
- * generates as output an object of this type and the object will be
- * automatically converted into a Numpy array.
- */
-struct ndarray_to_npy {
-
-  static PyObject* convert(const bob::python::ndarray& tv) {
-    return boost::python::incref(const_cast<bob::python::ndarray*>(&tv)->self().ptr());
-  }
-
-  static const PyTypeObject* get_pytype() { return &PyArray_Type; }
-
-};
-
-void register_ndarray_to_npy() {
-  boost::python::to_python_converter<bob::python::ndarray, ndarray_to_npy
-#if defined BOOST_PYTHON_SUPPORTS_PY_SIGNATURES
-                          ,true
-#endif
-              >();
-}
-
-/**
- * The same as for ndarray_from_npy, but bindings the const specialization. The
- * difference is that we don't require that the object given as input to be,
- * strictly, a NumPy ndarray, but are more relaxed.
- */
-struct const_ndarray_from_npy {
-
-  /**
-   * Registers converter from numpy array into a bob::python::ndarray
-   */
-  const_ndarray_from_npy() {
-    boost::python::converter::registry::push_back(&convertible, &construct,
-        boost::python::type_id<bob::python::const_ndarray>());
-  }
-
-  /**
-   * This method will determine if the input python object is convertible into
-   * an ndarray. To do that, the object has to convertible to a NumPy ndarray.
-   */
-  static void* convertible(PyObject* obj_ptr) {
-    boost::python::handle<> hdl(boost::python::borrowed(obj_ptr));
-    boost::python::object obj(hdl);
-    if (bob::python::convertible_to(obj, false, true)) //writeable=false, behaved=true
-      return obj_ptr;
-    return 0;
-  }
-
-  /**
-   * This method will finally construct the C++ element out of the python
-   * object that was input. Please note that when boost::python reaches this
-   * method, the object has already been checked for convertibility.
-   */
-  static void construct(PyObject* obj_ptr,
-      boost::python::converter::rvalue_from_python_stage1_data* data) {
-
-    //black-magic required to setup the bob::python::ndarray storage area
-    void* storage = ((boost::python::converter::rvalue_from_python_storage<bob::python::const_ndarray>*)data)->storage.bytes;
-
-    boost::python::handle<> hdl(boost::python::borrowed(obj_ptr));
-    boost::python::object tmp(hdl);
-    new (storage) bob::python::const_ndarray(tmp);
-    data->convertible = storage;
-
-  }
-
-};
-
-/**
- * Objects of this type bind bob::python::ndarray's to numpy arrays. Your method
- * generates as output an object of this type and the object will be
- * automatically converted into a Numpy array.
- */
-struct const_ndarray_to_npy {
-
-  static PyObject* convert(const bob::python::const_ndarray& tv) {
-    return boost::python::incref(const_cast<bob::python::const_ndarray*>(&tv)->self().ptr());
-  }
-
-  static const PyTypeObject* get_pytype() { return &PyArray_Type; }
-
-};
-
-void register_const_ndarray_to_npy() {
-  boost::python::to_python_converter<bob::python::const_ndarray, const_ndarray_to_npy
-#if defined BOOST_PYTHON_SUPPORTS_PY_SIGNATURES
-                          ,true
-#endif
-              >();
-}
-
-void bind_core_ndarray_numpy () {
-   ndarray_from_npy();
-   register_ndarray_to_npy();
-   const_ndarray_from_npy();
-   register_const_ndarray_to_npy();
-}
diff --git a/bob/learn/misc/old/plda.cc b/bob/learn/misc/old/plda.cc
deleted file mode 100644
index 64d94ed74547b4703cf89bd05737f82d8320b848..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/plda.cc
+++ /dev/null
@@ -1,216 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Fri Oct 14 18:07:56 2011 +0200
- *
- * @brief Python bindings for the PLDABase/PLDAMachine
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-#include <bob.io.base/api.h>
-
-#include "ndarray.h"
-#include <bob.learn.misc/PLDAMachine.h>
-
-using namespace boost::python;
-
-static void py_set_dim_d(bob::learn::misc::PLDABase& machine, const size_t dim_d)
-{
-  machine.resize(dim_d, machine.getDimF(), machine.getDimG());
-}
-static void py_set_dim_f(bob::learn::misc::PLDABase& machine, const size_t dim_f)
-{
-  machine.resize(machine.getDimD(), dim_f, machine.getDimG());
-}
-static void py_set_dim_g(bob::learn::misc::PLDABase& machine, const size_t dim_g)
-{
-  machine.resize(machine.getDimD(), machine.getDimF(), dim_g);
-}
-
-// Set methods that uses blitz::Arrays
-static void py_set_mu(bob::learn::misc::PLDABase& machine,
-  bob::python::const_ndarray mu)
-{
-  machine.setMu(mu.bz<double,1>());
-}
-
-static void py_set_f(bob::learn::misc::PLDABase& machine,
-  bob::python::const_ndarray f)
-{
-  machine.setF(f.bz<double,2>());
-}
-
-static void py_set_g(bob::learn::misc::PLDABase& machine,
-  bob::python::const_ndarray g)
-{
-  machine.setG(g.bz<double,2>());
-}
-
-static void py_set_sigma(bob::learn::misc::PLDABase& machine,
-  bob::python::const_ndarray sigma)
-{
-  machine.setSigma(sigma.bz<double,1>());
-}
-
-
-static double computeLogLikelihood(bob::learn::misc::PLDAMachine& plda,
-  bob::python::const_ndarray samples, bool with_enrolled_samples=true)
-{
-  const bob::io::base::array::typeinfo& info = samples.type();
-  switch (info.nd) {
-    case 1:
-      return plda.computeLogLikelihood(samples.bz<double,1>(), with_enrolled_samples);
-    case 2:
-      return plda.computeLogLikelihood(samples.bz<double,2>(), with_enrolled_samples);
-    default:
-      PYTHON_ERROR(TypeError, "PLDA log-likelihood computation does not accept input array with '" SIZE_T_FMT "' dimensions (only 1D or 2D arrays)", info.nd);
-  }
-}
-
-static double plda_forward_sample(bob::learn::misc::PLDAMachine& m,
-  bob::python::const_ndarray samples)
-{
-  const bob::io::base::array::typeinfo& info = samples.type();
-  switch (info.nd) {
-    case 1:
-      {
-        double score;
-        // Calls the forward function
-        m.forward(samples.bz<double,1>(), score);
-        return score;
-      }
-    case 2:
-      {
-        double score;
-        // Calls the forward function
-        m.forward(samples.bz<double,2>(), score);
-        return score;
-      }
-    default:
-      PYTHON_ERROR(TypeError, "PLDA forwarding does not accept input array with '" SIZE_T_FMT "' dimensions (only 1D or 2D arrays)", info.nd);
-  }
-}
-
-static double py_log_likelihood_point_estimate(bob::learn::misc::PLDABase& plda,
-  bob::python::const_ndarray xij, bob::python::const_ndarray hi,
-  bob::python::const_ndarray wij)
-{
-  return plda.computeLogLikelihoodPointEstimate(xij.bz<double,1>(),
-           hi.bz<double,1>(), wij.bz<double,1>());
-}
-
-BOOST_PYTHON_FUNCTION_OVERLOADS(computeLogLikelihood_overloads, computeLogLikelihood, 2, 3)
-
-
-static boost::shared_ptr<bob::learn::misc::PLDABase> b_init(boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::PLDABase>(new bob::learn::misc::PLDABase(*hdf5->f));
-}
-
-static void b_load(bob::learn::misc::PLDABase& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void b_save(const bob::learn::misc::PLDABase& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-
-static boost::shared_ptr<bob::learn::misc::PLDAMachine> m_init(boost::python::object file, boost::shared_ptr<bob::learn::misc::PLDABase> b){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::learn::misc::PLDAMachine>(new bob::learn::misc::PLDAMachine(*hdf5->f, b));
-}
-
-static void m_load(bob::learn::misc::PLDAMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.load(*hdf5->f);
-}
-
-static void m_save(const bob::learn::misc::PLDAMachine& self, boost::python::object file){
-  if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
-  PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  self.save(*hdf5->f);
-}
-
-void bind_machine_plda()
-{
-  class_<bob::learn::misc::PLDABase, boost::shared_ptr<bob::learn::misc::PLDABase> >("PLDABase", "A PLDABase can be seen as a container for the subspaces F, G, the diagonal covariance matrix sigma (stored as a 1D array) and the mean vector mu when performing Probabilistic Linear Discriminant Analysis (PLDA). PLDA is a probabilistic model that incorporates components describing both between-class and within-class variations. A PLDABase can be shared between several PLDAMachine that contains class-specific information (information about the enrolment samples).\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", init<const size_t, const size_t, const size_t, optional<const double> >((arg("self"), arg("dim_d"), arg("dim_f"), arg("dim_g"), arg("variance_flooring")=0.), "Builds a new PLDABase. dim_d is the dimensionality of the input features, dim_f is the dimensionality of the F subspace and dim_g the dimensionality of the G subspace. The variance flooring threshold is the minimum value that the variance sigma can reach, as this diagonal matrix is inverted."))
-    .def(init<>((arg("self")), "Constructs a new empty PLDABase."))
-    .def("__init__", boost::python::make_constructor(&b_init), "Constructs a new PLDABase from a configuration file.")
-    .def(init<const bob::learn::misc::PLDABase&>((arg("self"), arg("machine")), "Copy constructs a PLDABase"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::PLDABase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDABase with the 'other' one to be approximately the same.")
-    .def("load", &b_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
-    .def("save", &b_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .add_property("dim_d", &bob::learn::misc::PLDABase::getDimD, &py_set_dim_d, "Dimensionality of the input feature vectors")
-    .add_property("dim_f", &bob::learn::misc::PLDABase::getDimF, &py_set_dim_f, "Dimensionality of the F subspace/matrix of the PLDA model")
-    .add_property("dim_g", &bob::learn::misc::PLDABase::getDimG, &py_set_dim_g, "Dimensionality of the G subspace/matrix of the PLDA model")
-    .add_property("mu", make_function(&bob::learn::misc::PLDABase::getMu, return_value_policy<copy_const_reference>()), &py_set_mu, "The mean vector mu of the PLDA model")
-    .add_property("f", make_function(&bob::learn::misc::PLDABase::getF, return_value_policy<copy_const_reference>()), &py_set_f, "The subspace/matrix F of the PLDA model")
-    .add_property("g", make_function(&bob::learn::misc::PLDABase::getG, return_value_policy<copy_const_reference>()), &py_set_g, "The subspace/matrix G of the PLDA model")
-    .add_property("sigma", make_function(&bob::learn::misc::PLDABase::getSigma, return_value_policy<copy_const_reference>()), &py_set_sigma, "The diagonal covariance matrix (represented by a 1D numpy array) sigma of the PLDA model")
-    .add_property("variance_threshold", &bob::learn::misc::PLDABase::getVarianceThreshold, &bob::learn::misc::PLDABase::setVarianceThreshold,
-      "The variance flooring threshold, i.e. the minimum allowed value of variance (sigma) in each dimension. "
-      "The variance sigma will be set to this value if an attempt is made to set it to a smaller value.")
-    .def("resize", &bob::learn::misc::PLDABase::resize, (arg("self"), arg("dim_d"), arg("dim_f"), arg("dim_g")), "Resizes the dimensionality of the PLDA model. Paramaters mu, F, G and sigma are reinitialized.")
-    .def("has_gamma", &bob::learn::misc::PLDABase::hasGamma, (arg("self"), arg("a")), "Tells if the gamma matrix for the given number of samples has already been computed. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("compute_gamma", &bob::learn::misc::PLDABase::computeGamma, (arg("self"), arg("a"), arg("gamma")), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_add_gamma", make_function(&bob::learn::misc::PLDABase::getAddGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_gamma", make_function(&bob::learn::misc::PLDABase::getGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Returns the gamma matrix for the given number of samples if it has already been put in cache. Throws an exception otherwise. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("has_log_like_const_term", &bob::learn::misc::PLDABase::hasLogLikeConstTerm, (arg("self"), arg("a")), "Tells if the log likelihood constant term for the given number of samples has already been computed.")
-    .def("compute_log_like_const_term", (double (bob::learn::misc::PLDABase::*)(const size_t, const blitz::Array<double,2>&) const)&bob::learn::misc::PLDABase::computeLogLikeConstTerm, (arg("self"), arg("a"), arg("gamma")), "Computes the log likelihood constant term for the given number of samples.")
-    .def("get_add_log_like_const_term", &bob::learn::misc::PLDABase::getAddLogLikeConstTerm, (arg("self"), arg("a")), "Computes the log likelihood constant term for the given number of samples, and adds it to the machine (as well as gamma), if it does not already exist.")
-    .def("get_log_like_const_term", &bob::learn::misc::PLDABase::getLogLikeConstTerm, (arg("self"), arg("a")), "Returns the log likelihood constant term for the given number of samples if it has already been put in cache. Throws an exception otherwise.")
-    .def("clear_maps", &bob::learn::misc::PLDABase::clearMaps, (arg("self")), "Clear the maps containing the gamma's as well as the log likelihood constant term for few number of samples. These maps are used to make likelihood computations faster.")
-    .def("compute_log_likelihood_point_estimate", &py_log_likelihood_point_estimate, (arg("self"), arg("xij"), arg("hi"), arg("wij")), "Computes the log-likelihood of a sample given the latent variables hi and wij (point estimate rather than Bayesian-like full integration).")
-    .def(self_ns::str(self_ns::self))
-    .add_property("__isigma__", make_function(&bob::learn::misc::PLDABase::getISigma, return_value_policy<copy_const_reference>()), "sigma^{-1} matrix stored in cache")
-    .add_property("__alpha__", make_function(&bob::learn::misc::PLDABase::getAlpha, return_value_policy<copy_const_reference>()), "alpha matrix stored in cache")
-    .add_property("__beta__", make_function(&bob::learn::misc::PLDABase::getBeta, return_value_policy<copy_const_reference>()), "beta matrix stored in cache")
-    .add_property("__ft_beta__", make_function(&bob::learn::misc::PLDABase::getFtBeta, return_value_policy<copy_const_reference>()), "F^T.beta matrix stored in cache")
-    .add_property("__gt_i_sigma__", make_function(&bob::learn::misc::PLDABase::getGtISigma, return_value_policy<copy_const_reference>()), "G^T.sigma^{-1} matrix stored in cache")
-    .add_property("__logdet_alpha__", &bob::learn::misc::PLDABase::getLogDetAlpha, "Logarithm of the determinant of the alpha matrix stored in cache.")
-    .add_property("__logdet_sigma__", &bob::learn::misc::PLDABase::getLogDetSigma, "Logarithm of the determinant of the sigma matrix stored in cache.")
-    .def("__precompute__", &bob::learn::misc::PLDABase::precompute, (arg("self")), "Precomputes useful values such as alpha and beta.")
-    .def("__precompute_log_like__", &bob::learn::misc::PLDABase::precomputeLogLike, (arg("self")), "Precomputes useful values for log-likelihood computations.")
-  ;
-
-  class_<bob::learn::misc::PLDAMachine, boost::shared_ptr<bob::learn::misc::PLDAMachine> >("PLDAMachine", "A PLDAMachine contains class-specific information (from the enrolment samples) when performing Probabilistic Linear Discriminant Analysis (PLDA). It should be attached to a PLDABase that contains information such as the subspaces F and G.\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", init<boost::shared_ptr<bob::learn::misc::PLDABase> >((arg("self"), arg("plda_base")), "Builds a new PLDAMachine. An attached PLDABase should be provided, that can be shared by several PLDAMachine."))
-    .def(init<>((arg("self")), "Constructs a new empty (invalid) PLDAMachine. A PLDABase should then be set using the 'plda_base' attribute of this object."))
-    .def("__init__", make_constructor(&m_init), "Constructs a new PLDAMachine from a configuration file (and a PLDABase object).")
-    .def(init<const bob::learn::misc::PLDAMachine&>((arg("self"), arg("machine")), "Copy constructs a PLDAMachine"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::PLDAMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDAMachine with the 'other' one to be approximately the same.")
-    .def("load", &m_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file. The PLDABase will not be loaded, and has to be set manually using the 'plda_base' attribute.")
-    .def("save", &m_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file. The PLDABase will not be saved, and has to be saved separately, as it can be shared by several PLDAMachines.")
-    .add_property("plda_base", &bob::learn::misc::PLDAMachine::getPLDABase, &bob::learn::misc::PLDAMachine::setPLDABase)
-    .add_property("dim_d", &bob::learn::misc::PLDAMachine::getDimD, "Dimensionality of the input feature vectors")
-    .add_property("dim_f", &bob::learn::misc::PLDAMachine::getDimF, "Dimensionality of the F subspace/matrix of the PLDA model")
-    .add_property("dim_g", &bob::learn::misc::PLDAMachine::getDimG, "Dimensionality of the G subspace/matrix of the PLDA model")
-    .add_property("n_samples", &bob::learn::misc::PLDAMachine::getNSamples, &bob::learn::misc::PLDAMachine::setNSamples, "Number of enrolled samples")
-    .add_property("w_sum_xit_beta_xi", &bob::learn::misc::PLDAMachine::getWSumXitBetaXi, &bob::learn::misc::PLDAMachine::setWSumXitBetaXi)
-    .add_property("weighted_sum", make_function(&bob::learn::misc::PLDAMachine::getWeightedSum, return_value_policy<copy_const_reference>()), &bob::learn::misc::PLDAMachine::setWeightedSum)
-    .add_property("log_likelihood", &bob::learn::misc::PLDAMachine::getLogLikelihood, &bob::learn::misc::PLDAMachine::setLogLikelihood)
-    .def("has_gamma", &bob::learn::misc::PLDAMachine::hasGamma, (arg("self"), arg("a")), "Tells if the gamma matrix for the given number of samples has already been computed. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_add_gamma", make_function(&bob::learn::misc::PLDAMachine::getAddGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_gamma", make_function(&bob::learn::misc::PLDAMachine::getGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Returns the gamma matrix for the given number of samples if it has already been put in cache. Throws an exception otherwise. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("has_log_like_const_term", &bob::learn::misc::PLDAMachine::hasLogLikeConstTerm, (arg("self"), arg("a")), "Tells if the log likelihood constant term for the given number of samples has already been computed.")
-    .def("get_add_log_like_const_term", &bob::learn::misc::PLDAMachine::getAddLogLikeConstTerm, (arg("self"), arg("a")), "Computes the log likelihood constant term for the given number of samples, and adds it to the machine (as well as gamma), if it does not already exist.")
-    .def("get_log_like_const_term", &bob::learn::misc::PLDAMachine::getLogLikeConstTerm, (arg("self"), arg("a")), "Returns the log likelihood constant term for the given number of samples if it has already been put in cache. Throws an exception otherwise.")
-    .def("clear_maps", &bob::learn::misc::PLDAMachine::clearMaps, (arg("self")), "Clears the maps containing the gamma's as well as the log likelihood constant term for few number of samples. These maps are used to make likelihood computations faster.")
-    .def("compute_log_likelihood", &computeLogLikelihood, computeLogLikelihood_overloads((arg("self"), arg("sample"), arg("use_enrolled_samples")=true), "Computes the log-likelihood considering only the probe sample(s) or jointly the probe sample(s) and the enrolled samples."))
-    .def("__call__", &plda_forward_sample, (arg("self"), arg("sample")), "Processes a sample and returns a log-likelihood ratio score.")
-    .def("forward", &plda_forward_sample, (arg("self"), arg("sample")), "Processes a sample and returns a log-likelihood ratio score.")
-  ;
-}
diff --git a/bob/learn/misc/old/plda_trainer.cc b/bob/learn/misc/old/plda_trainer.cc
deleted file mode 100644
index c68444496c8940d640d7844e1967c9c0dca72f5f..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/plda_trainer.cc
+++ /dev/null
@@ -1,171 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Fri Oct 14 18:07:56 2011 +0200
- *
- * @brief Python bindings to Probabilistic Linear Discriminant Analysis
- * trainers.
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-#include <boost/python/stl_iterator.hpp>
-#include <bob.learn.misc/PLDAMachine.h>
-#include <bob.learn.misc/PLDATrainer.h>
-
-using namespace boost::python;
-
-typedef bob::learn::misc::EMTrainer<bob::learn::misc::PLDABase, std::vector<blitz::Array<double,2> > > EMTrainerPLDA;
-
-static void plda_train(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
-  std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  for(std::vector<bob::python::const_ndarray>::iterator it=vdata.begin();
-      it!=vdata.end(); ++it)
-    vdata_ref.push_back(it->bz<double,2>());
-  // Calls the train function
-  t.train(m, vdata_ref);
-}
-
-static void plda_initialize(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
-  std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  for(std::vector<bob::python::const_ndarray>::iterator it=vdata.begin();
-      it!=vdata.end(); ++it)
-    vdata_ref.push_back(it->bz<double,2>());
-  // Calls the initialization function
-  t.initialize(m, vdata_ref);
-}
-
-static void plda_eStep(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
-  std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  for(std::vector<bob::python::const_ndarray>::iterator it=vdata.begin();
-      it!=vdata.end(); ++it)
-    vdata_ref.push_back(it->bz<double,2>());
-  // Calls the eStep function
-  t.eStep(m, vdata_ref);
-}
-
-static void plda_mStep(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
-  std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  for(std::vector<bob::python::const_ndarray>::iterator it=vdata.begin();
-      it!=vdata.end(); ++it)
-    vdata_ref.push_back(it->bz<double,2>());
-  // Calls the mStep function
-  t.mStep(m, vdata_ref);
-}
-
-static void plda_finalize(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
-{
-  stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
-  std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
-  std::vector<blitz::Array<double,2> > vdata_ref;
-  for(std::vector<bob::python::const_ndarray>::iterator it=vdata.begin();
-      it!=vdata.end(); ++it)
-    vdata_ref.push_back(it->bz<double,2>());
-  // Calls the finalization function
-  t.finalize(m, vdata_ref);
-}
-
-static object get_z_first_order(bob::learn::misc::PLDATrainer& m) {
-  const std::vector<blitz::Array<double,2> >& v = m.getZFirstOrder();
-  list retval;
-  for (size_t k=0; k<v.size(); ++k) retval.append(v[k]); //copy
-  return tuple(retval);
-}
-
-static object get_z_second_order(bob::learn::misc::PLDATrainer& m) {
-  const std::vector<blitz::Array<double,3> >& v = m.getZSecondOrder();
-  list retval;
-  for (size_t k=0; k<v.size(); ++k) retval.append(v[k]); //copy
-  return tuple(retval);
-}
-
-
-// include the random API of bob.core
-#include <bob.core/random_api.h>
-static boost::python::object TB_getRng(EMTrainerPLDA& self){
-  // create new object
-  PyObject* o = PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type,0);
-  reinterpret_cast<PyBoostMt19937Object*>(o)->rng = self.getRng().get();
-  return boost::python::object(boost::python::handle<>(boost::python::borrowed(o)));
-}
-
-#include <boost/make_shared.hpp>
-static void TB_setRng(EMTrainerPLDA& self, boost::python::object rng){
-  if (!PyBoostMt19937_Check(rng.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.core.random.mt19937 object");
-  PyBoostMt19937Object* o = reinterpret_cast<PyBoostMt19937Object*>(rng.ptr());
-  self.setRng(boost::make_shared<boost::mt19937>(*o->rng));
-}
-
-
-void bind_trainer_plda()
-{
-  class_<EMTrainerPLDA, boost::noncopyable>("EMTrainerPLDA", "The base python class for all EM/PLDA-based trainers.", no_init)
-    .add_property("max_iterations", &EMTrainerPLDA::getMaxIterations, &EMTrainerPLDA::setMaxIterations, "Max iterations")
-    .add_property("rng", &TB_getRng, &TB_setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
-    .def("train", &plda_train, (arg("self"), arg("machine"), arg("data")), "Trains a PLDABase using data (mu, F, G and sigma are learnt).")
-    .def("initialize", &plda_initialize, (arg("self"), arg("machine"), arg("data")), "This method is called before the EM algorithm")
-    .def("finalize", &plda_finalize, (arg("self"), arg("machine"), arg("data")), "This method is called at the end of the EM algorithm")
-    .def("e_step", &plda_eStep, (arg("self"), arg("machine"), arg("data")),
-       "Updates the hidden variable distribution (or the sufficient statistics) given the Machine parameters. ")
-    .def("m_step", &plda_mStep, (arg("self"), arg("machine"), arg("data")), "Updates the Machine parameters given the hidden variable distribution (or the sufficient statistics)")
-  ;
-
-  class_<bob::learn::misc::PLDATrainer, boost::noncopyable, bases<EMTrainerPLDA> > PLDAT("PLDATrainer", "A trainer for Probabilistic Linear Discriminant Analysis (PLDA). The train() method will learn the mu, F, G and Sigma of the model, whereas the enrol() method, will store model information about the enrolment samples for a specific class.\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", no_init);
-
-  PLDAT.def(init<optional<const size_t, const bool> >((arg("self"), arg("max_iterations")=100, arg("use_sum_second_order")=true),"Initializes a new PLDATrainer."))
-    .def(init<const bob::learn::misc::PLDATrainer&>((arg("self"), arg("trainer")), "Copy constructs a PLDATrainer"))
-    .def(self == self)
-    .def(self != self)
-    .def("is_similar_to", &bob::learn::misc::PLDATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDATrainer with the 'other' one to be approximately the same.")
-    .def("enrol", &bob::learn::misc::PLDATrainer::enrol, (arg("self"), arg("plda_machine"), arg("data")), "Enrol a class-specific model (PLDAMachine) given a set of enrolment samples.")
-    .add_property("use_sum_second_order", &bob::learn::misc::PLDATrainer::getUseSumSecondOrder, &bob::learn::misc::PLDATrainer::setUseSumSecondOrder, "Tells whether the second order statistics are stored during the training procedure, or only their sum.")
-    .add_property("z_first_order", &get_z_first_order)
-    .add_property("z_second_order", &get_z_second_order)
-    .add_property("z_second_order_sum", make_function(&bob::learn::misc::PLDATrainer::getZSecondOrderSum, return_value_policy<copy_const_reference>()))
-  ;
-
-  // Sets the scope to the one of the PLDATrainer
-  scope s(PLDAT);
-
-  // Adds enums in the previously defined current scope
-  enum_<bob::learn::misc::PLDATrainer::InitFMethod>("init_f_method")
-    .value("RANDOM_F", bob::learn::misc::PLDATrainer::RANDOM_F)
-    .value("BETWEEN_SCATTER", bob::learn::misc::PLDATrainer::BETWEEN_SCATTER)
-    .export_values()
-  ;
-
-  enum_<bob::learn::misc::PLDATrainer::InitGMethod>("init_g_method")
-    .value("RANDOM_G", bob::learn::misc::PLDATrainer::RANDOM_G)
-    .value("WITHIN_SCATTER", bob::learn::misc::PLDATrainer::WITHIN_SCATTER)
-    .export_values()
-  ;
-
-  enum_<bob::learn::misc::PLDATrainer::InitSigmaMethod>("init_sigma_method")
-    .value("RANDOM_SIGMA", bob::learn::misc::PLDATrainer::RANDOM_SIGMA)
-    .value("VARIANCE_G", bob::learn::misc::PLDATrainer::VARIANCE_G)
-    .value("CONSTANT", bob::learn::misc::PLDATrainer::CONSTANT)
-    .value("VARIANCE_DATA", bob::learn::misc::PLDATrainer::VARIANCE_DATA)
-    .export_values()
-  ;
-
-  // Binds randomization/enumration-related methods
-  PLDAT.add_property("init_f_method", &bob::learn::misc::PLDATrainer::getInitFMethod, &bob::learn::misc::PLDATrainer::setInitFMethod, "The method used for the initialization of F.")
-    .add_property("init_f_ratio", &bob::learn::misc::PLDATrainer::getInitFRatio, &bob::learn::misc::PLDATrainer::setInitFRatio, "The ratio used for the initialization of F.")
-    .add_property("init_g_method", &bob::learn::misc::PLDATrainer::getInitGMethod, &bob::learn::misc::PLDATrainer::setInitGMethod, "The method used for the initialization of G.")
-    .add_property("init_g_ratio", &bob::learn::misc::PLDATrainer::getInitGRatio, &bob::learn::misc::PLDATrainer::setInitGRatio, "The ratio used for the initialization of G.")
-    .add_property("init_sigma_method", &bob::learn::misc::PLDATrainer::getInitSigmaMethod, &bob::learn::misc::PLDATrainer::setInitSigmaMethod, "The method used for the initialization of sigma.")
-    .add_property("init_sigma_ratio", &bob::learn::misc::PLDATrainer::getInitSigmaRatio, &bob::learn::misc::PLDATrainer::setInitSigmaRatio, "The ratio used for the initialization of sigma.")
-  ;
-}
diff --git a/bob/learn/misc/old/tinyvector.cc b/bob/learn/misc/old/tinyvector.cc
deleted file mode 100644
index f992e105c0daad9a8f21d8ef5086e6a95a07d07d..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/tinyvector.cc
+++ /dev/null
@@ -1,290 +0,0 @@
-/**
- * @author André Anjos <andre.anjos@idiap.ch>
- * @date Tue Jan 18 17:07:26 2011 +0100
- *
- * @brief Automatic converters to-from python for blitz::TinyVectors
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include <boost/python.hpp>
-#include <boost/format.hpp>
-#include <blitz/tinyvec2.h>
-
-/**
- * Objects of this type create a binding between blitz::TinyVector<T,N> and
- * python iterables. You can specify a python iterable as a parameter to a
- * bound method that would normally receive a TinyVector<T,N> or a const
- * TinyVector<T,N>& and the conversion will just magically happen.
- */
-template <typename T, int N>
-struct tinyvec_from_sequence {
-  typedef typename blitz::TinyVector<T,N> container_type;
-
-  /**
-   * Registers converter from any python sequence into a blitz::TinyVector<T,N>
-   */
-  tinyvec_from_sequence() {
-    boost::python::converter::registry::push_back(&convertible, &construct,
-        boost::python::type_id<container_type>());
-  }
-
-  /**
-   * This method will determine if the input python object is convertible into
-   * a TinyVector<T,N>
-   *
-   * Conditions:
-   * - The input object has to have N elements.
-   * - The input object has to be iterable.
-   * - All elements in the input object have to be convertible to T objects
-   */
-  static void* convertible(PyObject* obj_ptr) {
-
-    /**
-     * this bit will check if the input obj is one of the expected input types
-     * It will return 0 if the element in question is neither:
-     * - a list
-     * - a tuple
-     * - an iterable
-     * - a range
-     * - is not a string _and_ is not an unicode string _and_
-     *   (is a valid object pointer _or_ (too long to continue... ;-)
-     */
-    if (!(PyList_Check(obj_ptr)
-          || PyTuple_Check(obj_ptr)
-          || PyIter_Check(obj_ptr)
-          || PyRange_Check(obj_ptr)
-          || (
-#if PY_VERSION_HEX < 0x03000000
-               !PyString_Check(obj_ptr)
-#else
-               !PyBytes_Check(obj_ptr)
-#endif
-            && !PyUnicode_Check(obj_ptr)
-            && ( Py_TYPE(obj_ptr) == 0
-              || Py_TYPE(Py_TYPE(obj_ptr)) == 0
-              || Py_TYPE(Py_TYPE(obj_ptr))->tp_name == 0
-              || std::strcmp(
-                 Py_TYPE(Py_TYPE(obj_ptr))->tp_name, "Boost.Python.class") != 0)
-            && PyObject_HasAttrString(obj_ptr, "__len__")
-            && PyObject_HasAttrString(obj_ptr, "__getitem__")))) return 0;
-
-    //this bit will check if we have exactly N
-    if(PyObject_Length(obj_ptr) != N) {
-      PyErr_Clear();
-      return 0;
-    }
-
-    //this bit will make sure we can extract an interator from the object
-    boost::python::handle<> obj_iter(
-        boost::python::allow_null(PyObject_GetIter(obj_ptr)));
-    if (!obj_iter.get()) { // must be convertible to an iterator
-      PyErr_Clear();
-      return 0;
-    }
-
-    //this bit will check every element for convertibility into "T"
-    bool is_range = PyRange_Check(obj_ptr);
-    std::size_t i=0;
-    for(;;++i) { //if everything ok, should leave for loop with i == N
-      boost::python::handle<> py_elem_hdl(
-          boost::python::allow_null(PyIter_Next(obj_iter.get())));
-      if (PyErr_Occurred()) {
-        PyErr_Clear();
-        return 0;
-      }
-      if (!py_elem_hdl.get()) break; // end of iteration
-      boost::python::object py_elem_obj(py_elem_hdl);
-      boost::python::extract<T> elem_proxy(py_elem_obj);
-      if (!elem_proxy.check()) return 0;
-      if (is_range) break; // in a range all elements are of the same type
-    }
-    if (!is_range) assert(i == N);
-
-    return obj_ptr;
-  }
-
-  /**
-   * This method will finally construct the C++ element out of the python
-   * object that was input. Please note that when boost::python reaches this
-   * method, the object has already been checked for convertibility.
-   */
-  static void construct(PyObject* obj_ptr,
-      boost::python::converter::rvalue_from_python_stage1_data* data) {
-    boost::python::handle<> obj_iter(PyObject_GetIter(obj_ptr));
-    void* storage = ((boost::python::converter::rvalue_from_python_storage<container_type>*)data)->storage.bytes;
-    new (storage) container_type();
-    data->convertible = storage;
-    container_type& result = *((container_type*)storage);
-    std::size_t i=0;
-    for(;;++i) {
-      boost::python::handle<> py_elem_hdl(
-          boost::python::allow_null(PyIter_Next(obj_iter.get())));
-      if (PyErr_Occurred()) boost::python::throw_error_already_set();
-      if (!py_elem_hdl.get()) break; // end of iteration
-      boost::python::object py_elem_obj(py_elem_hdl);
-      typename boost::python::extract<T> elem_proxy(py_elem_obj);
-      result[i] = elem_proxy();
-    }
-    if (i != N) {
-      boost::format s("expected %d elements for TinyVector<T,%d>, got %d");
-      s % N % N % i;
-      PyErr_SetString(PyExc_RuntimeError, s.str().c_str());
-      boost::python::throw_error_already_set();
-    }
-  }
-
-};
-
-/**
- * Objects of this type bind TinyVector<T,N> to python tuples. Your method
- * generates as output an object of this type and the object will be
- * automatically converted into a python tuple.
- */
-template <typename T, int N>
-struct tinyvec_to_tuple {
-  typedef typename blitz::TinyVector<T,N> container_type;
-
-  static PyObject* convert(const container_type& tv) {
-    boost::python::list result;
-    typedef typename container_type::const_iterator const_iter;
-    for(const_iter p=tv.begin();p!=tv.end();++p) {
-      result.append(boost::python::object(*p));
-    }
-    return boost::python::incref(boost::python::tuple(result).ptr());
-  }
-
-  static const PyTypeObject* get_pytype() { return &PyTuple_Type; }
-
-};
-
-template <typename T, int N>
-void register_tinyvec_to_tuple() {
-  boost::python::to_python_converter<typename blitz::TinyVector<T,N>,
-                          tinyvec_to_tuple<T,N>
-#if defined BOOST_PYTHON_SUPPORTS_PY_SIGNATURES
-                          ,true
-#endif
-              >();
-}
-
-void bind_core_tinyvector () {
-
-  /**
-   * The following struct constructors will make sure we can input
-   * blitz::TinyVector<T,N> in our bound C++ routines w/o needing to specify
-   * special converters each time. The rvalue converters allow boost::python to
-   * automatically map the following inputs:
-   *
-   * a) const blitz::TinyVector<T,N>& (pass by const reference)
-   * b) blitz::TinyVector<T,N> (pass by value)
-   *
-   * Please note that the last case:
-   *
-   * c) blitz::TinyVector<T,N>& (pass by non-const reference)
-   *
-   * is NOT covered by these converters. The reason being that because the
-   * object may be changed, there is no way for boost::python to update the
-   * original python object, in a sensible manner, at the return of the method.
-   *
-   * Avoid passing by non-const reference in your methods.
-   */
-  tinyvec_from_sequence<int,1>();
-  tinyvec_from_sequence<int,2>();
-  tinyvec_from_sequence<int,3>();
-  tinyvec_from_sequence<int,4>();
-  tinyvec_from_sequence<int,5>();
-  tinyvec_from_sequence<int,6>();
-  tinyvec_from_sequence<int,7>();
-  tinyvec_from_sequence<int,8>();
-  tinyvec_from_sequence<int,9>();
-  tinyvec_from_sequence<int,10>();
-  tinyvec_from_sequence<int,11>();
-  tinyvec_from_sequence<uint64_t,1>();
-  tinyvec_from_sequence<uint64_t,2>();
-  tinyvec_from_sequence<uint64_t,3>();
-  tinyvec_from_sequence<uint64_t,4>();
-  tinyvec_from_sequence<uint64_t,5>();
-  tinyvec_from_sequence<uint64_t,6>();
-  tinyvec_from_sequence<uint64_t,7>();
-  tinyvec_from_sequence<uint64_t,8>();
-  tinyvec_from_sequence<uint64_t,9>();
-  tinyvec_from_sequence<uint64_t,10>();
-  tinyvec_from_sequence<uint64_t,11>();
-  tinyvec_from_sequence<double,1>();
-  tinyvec_from_sequence<double,2>();
-  tinyvec_from_sequence<double,3>();
-  tinyvec_from_sequence<double,4>();
-  tinyvec_from_sequence<double,5>();
-  tinyvec_from_sequence<double,6>();
-  tinyvec_from_sequence<double,7>();
-  tinyvec_from_sequence<double,8>();
-  tinyvec_from_sequence<double,9>();
-  tinyvec_from_sequence<double,10>();
-  tinyvec_from_sequence<double,11>();
-  if (typeid(int) != typeid(blitz::diffType)) {
-    tinyvec_from_sequence<blitz::diffType,1>();
-    tinyvec_from_sequence<blitz::diffType,2>();
-    tinyvec_from_sequence<blitz::diffType,3>();
-    tinyvec_from_sequence<blitz::diffType,4>();
-    tinyvec_from_sequence<blitz::diffType,5>();
-    tinyvec_from_sequence<blitz::diffType,6>();
-    tinyvec_from_sequence<blitz::diffType,7>();
-    tinyvec_from_sequence<blitz::diffType,8>();
-    tinyvec_from_sequence<blitz::diffType,9>();
-    tinyvec_from_sequence<blitz::diffType,10>();
-    tinyvec_from_sequence<blitz::diffType,11>();
-  }
-
-  /**
-   * The following struct constructors will make C++ return values of type
-   * blitz::TinyVector<T,N> to show up in the python side as tuples.
-   */
-  register_tinyvec_to_tuple<int,1>();
-  register_tinyvec_to_tuple<int,2>();
-  register_tinyvec_to_tuple<int,3>();
-  register_tinyvec_to_tuple<int,4>();
-  register_tinyvec_to_tuple<int,5>();
-  register_tinyvec_to_tuple<int,6>();
-  register_tinyvec_to_tuple<int,7>();
-  register_tinyvec_to_tuple<int,8>();
-  register_tinyvec_to_tuple<int,9>();
-  register_tinyvec_to_tuple<int,10>();
-  register_tinyvec_to_tuple<int,11>();
-  register_tinyvec_to_tuple<uint64_t,1>();
-  register_tinyvec_to_tuple<uint64_t,2>();
-  register_tinyvec_to_tuple<uint64_t,3>();
-  register_tinyvec_to_tuple<uint64_t,4>();
-  register_tinyvec_to_tuple<uint64_t,5>();
-  register_tinyvec_to_tuple<uint64_t,6>();
-  register_tinyvec_to_tuple<uint64_t,7>();
-  register_tinyvec_to_tuple<uint64_t,8>();
-  register_tinyvec_to_tuple<uint64_t,9>();
-  register_tinyvec_to_tuple<uint64_t,10>();
-  register_tinyvec_to_tuple<uint64_t,11>();
-  register_tinyvec_to_tuple<double,1>();
-  register_tinyvec_to_tuple<double,2>();
-  register_tinyvec_to_tuple<double,3>();
-  register_tinyvec_to_tuple<double,4>();
-  register_tinyvec_to_tuple<double,5>();
-  register_tinyvec_to_tuple<double,6>();
-  register_tinyvec_to_tuple<double,7>();
-  register_tinyvec_to_tuple<double,8>();
-  register_tinyvec_to_tuple<double,9>();
-  register_tinyvec_to_tuple<double,10>();
-  register_tinyvec_to_tuple<double,11>();
-  if (typeid(int) != typeid(blitz::diffType)) {
-    register_tinyvec_to_tuple<blitz::diffType,1>();
-    register_tinyvec_to_tuple<blitz::diffType,2>();
-    register_tinyvec_to_tuple<blitz::diffType,3>();
-    register_tinyvec_to_tuple<blitz::diffType,4>();
-    register_tinyvec_to_tuple<blitz::diffType,5>();
-    register_tinyvec_to_tuple<blitz::diffType,6>();
-    register_tinyvec_to_tuple<blitz::diffType,7>();
-    register_tinyvec_to_tuple<blitz::diffType,8>();
-    register_tinyvec_to_tuple<blitz::diffType,9>();
-    register_tinyvec_to_tuple<blitz::diffType,10>();
-    register_tinyvec_to_tuple<blitz::diffType,11>();
-  }
-
-}
diff --git a/bob/learn/misc/old/ztnorm.cc b/bob/learn/misc/old/ztnorm.cc
deleted file mode 100644
index 9affb8fe817d93832efa98df95ceb59d5f992450..0000000000000000000000000000000000000000
--- a/bob/learn/misc/old/ztnorm.cc
+++ /dev/null
@@ -1,152 +0,0 @@
-/**
- * @author Francois Moulin <Francois.Moulin@idiap.ch>
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Tue Jul 19 15:33:20 2011 +0200
- *
- * @brief Binds ZT-normalization to python
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "ndarray.h"
-
-#include <bob.learn.misc/ZTNorm.h>
-
-using namespace boost::python;
-
-static object ztnorm1(
-  bob::python::const_ndarray rawscores_probes_vs_models,
-  bob::python::const_ndarray rawscores_zprobes_vs_models,
-  bob::python::const_ndarray rawscores_probes_vs_tmodels,
-  bob::python::const_ndarray rawscores_zprobes_vs_tmodels,
-  bob::python::const_ndarray mask_zprobes_vs_tmodels_istruetrial)
-{
-  const blitz::Array<double,2> rawscores_probes_vs_models_ =
-    rawscores_probes_vs_models.bz<double,2>();
-  const blitz::Array<double,2> rawscores_zprobes_vs_models_ =
-    rawscores_zprobes_vs_models.bz<double,2>();
-  const blitz::Array<double,2> rawscores_probes_vs_tmodels_ =
-    rawscores_probes_vs_tmodels.bz<double,2>();
-  const blitz::Array<double,2> rawscores_zprobes_vs_tmodels_ =
-    rawscores_zprobes_vs_tmodels.bz<double,2>();
-  const blitz::Array<bool,2> mask_zprobes_vs_tmodels_istruetrial_ =
-    mask_zprobes_vs_tmodels_istruetrial.bz<bool,2>();
-
-  // allocate output
-  bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
-  blitz::Array<double, 2> ret_ = ret.bz<double,2>();
-
-  bob::learn::misc::ztNorm(rawscores_probes_vs_models_,
-                       rawscores_zprobes_vs_models_,
-                       rawscores_probes_vs_tmodels_,
-                       rawscores_zprobes_vs_tmodels_,
-                       mask_zprobes_vs_tmodels_istruetrial_,
-                       ret_);
-
-  return ret.self();
-}
-
-static object ztnorm2(
-  bob::python::const_ndarray rawscores_probes_vs_models,
-  bob::python::const_ndarray rawscores_zprobes_vs_models,
-  bob::python::const_ndarray rawscores_probes_vs_tmodels,
-  bob::python::const_ndarray rawscores_zprobes_vs_tmodels)
-{
-  const blitz::Array<double,2> rawscores_probes_vs_models_ =
-    rawscores_probes_vs_models.bz<double,2>();
-  const blitz::Array<double,2> rawscores_zprobes_vs_models_ =
-    rawscores_zprobes_vs_models.bz<double,2>();
-  const blitz::Array<double,2> rawscores_probes_vs_tmodels_ =
-    rawscores_probes_vs_tmodels.bz<double,2>();
-  const blitz::Array<double,2> rawscores_zprobes_vs_tmodels_ =
-    rawscores_zprobes_vs_tmodels.bz<double,2>();
-
-  // allocate output
-  bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
-  blitz::Array<double, 2> ret_ = ret.bz<double,2>();
-
-  bob::learn::misc::ztNorm(rawscores_probes_vs_models_,
-                       rawscores_zprobes_vs_models_,
-                       rawscores_probes_vs_tmodels_,
-                       rawscores_zprobes_vs_tmodels_,
-                       ret_);
-
-  return ret.self();
-}
-
-static object tnorm(
-  bob::python::const_ndarray rawscores_probes_vs_models,
-  bob::python::const_ndarray rawscores_probes_vs_tmodels)
-{
-  const blitz::Array<double,2> rawscores_probes_vs_models_ =
-    rawscores_probes_vs_models.bz<double,2>();
-  const blitz::Array<double,2> rawscores_probes_vs_tmodels_ =
-    rawscores_probes_vs_tmodels.bz<double,2>();
-
-  // allocate output
-  bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
-  blitz::Array<double, 2> ret_ = ret.bz<double,2>();
-
-  bob::learn::misc::tNorm(rawscores_probes_vs_models_,
-                       rawscores_probes_vs_tmodels_,
-                       ret_);
-
-  return ret.self();
-}
-
-static object znorm(
-  bob::python::const_ndarray rawscores_probes_vs_models,
-  bob::python::const_ndarray rawscores_zprobes_vs_models)
-{
-  const blitz::Array<double,2> rawscores_probes_vs_models_ =
-    rawscores_probes_vs_models.bz<double,2>();
-  const blitz::Array<double,2> rawscores_zprobes_vs_models_ =
-    rawscores_zprobes_vs_models.bz<double,2>();
-
-  // allocate output
-  bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
-  blitz::Array<double, 2> ret_ = ret.bz<double,2>();
-
-  bob::learn::misc::zNorm(rawscores_probes_vs_models_,
-                       rawscores_zprobes_vs_models_,
-                       ret_);
-
-  return ret.self();
-}
-
-void bind_machine_ztnorm()
-{
-  def("ztnorm",
-      ztnorm1,
-      args("rawscores_probes_vs_models",
-           "rawscores_zprobes_vs_models",
-           "rawscores_probes_vs_tmodels",
-           "rawscores_zprobes_vs_tmodels",
-           "mask_zprobes_vs_tmodels_istruetrial"),
-      "Normalise raw scores with ZT-Norm"
-     );
-
-  def("ztnorm",
-      ztnorm2,
-      args("rawscores_probes_vs_models",
-           "rawscores_zprobes_vs_models",
-           "rawscores_probes_vs_tmodels",
-           "rawscores_zprobes_vs_tmodels"),
-      "Normalise raw scores with ZT-Norm. Assume that znorm and tnorm have no common subject id."
-     );
-
-  def("tnorm",
-      tnorm,
-      args("rawscores_probes_vs_models",
-           "rawscores_probes_vs_tmodels"),
-      "Normalise raw scores with T-Norm."
-     );
-
-  def("znorm",
-      znorm,
-      args("rawscores_probes_vs_models",
-           "rawscores_zprobes_vs_models"),
-      "Normalise raw scores with Z-Norm."
-     );
-
-}
diff --git a/bob/learn/misc/plda_base.cpp b/bob/learn/misc/plda_base.cpp
deleted file mode 100644
index 2e3d97279e5f526c75023e29a39ac5041723a903..0000000000000000000000000000000000000000
--- a/bob/learn/misc/plda_base.cpp
+++ /dev/null
@@ -1,1097 +0,0 @@
-/**
- * @date Thu Jan 29 15:44:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static auto PLDABase_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".PLDABase",
-
-  "This class is a container for the :math:`F` (between class variantion matrix), :math:`G` (within class variantion matrix) and :math:`\\Sigma` "
-  "matrices and the mean vector :math:`\\mu` of a PLDA model. This also"
-  "precomputes useful matrices to make the model scalable."
-  "References: [ElShafey2014,PrinceElder2007,LiFu2012]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-
-     "Constructor, builds a new PLDABase. :math:`F`, :math:`G` "
-     "and :math:`\\Sigma` are initialized to the 'eye' matrix (matrix with 1's "
-     "on the diagonal and 0 outside), and :math:`\\mu` is initialized to 0.",
-
-    "",
-    true
-  )
-  .add_prototype("dim_d,dim_f,dim_g,variance_threshold","")
-  .add_prototype("other","")
-  .add_prototype("hdf5","")
-
-  .add_parameter("dim_D", "int", "Dimensionality of the feature vector.")
-  .add_parameter("dim_F", "int", "Size of :math:`F`(between class variantion matrix).")
-  .add_parameter("dim_G", "int", "Size of :math:`G`(within class variantion matrix).")
-  .add_parameter("variance_threshold", "double", "The smallest possible value of the variance (Ignored if set to 0.)")
-  
-  .add_parameter("other", ":py:class:`bob.learn.misc.PLDABase`", "A PLDABase object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscPLDABase_init_copy(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDABase_doc.kwlist(1);
-  PyBobLearnMiscPLDABaseObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscPLDABase_Type, &o)){
-    PLDABase_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::PLDABase(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscPLDABase_init_hdf5(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDABase_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
-    PLDABase_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::PLDABase(*(config->f)));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscPLDABase_init_dim(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDABase_doc.kwlist(0);
-  
-  int dim_D, dim_F, dim_G = 1;
-  double variance_threshold = 0.0;
-
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iii|d", kwlist, &dim_D, &dim_F, &dim_G, &variance_threshold)){
-    PLDABase_doc.print_usage();
-    return -1;
-  }
-  
-  if(dim_D <= 0){
-    PyErr_Format(PyExc_TypeError, "dim_D argument must be greater than or equal to one");
-    return -1;
-  }
-  
-  if(dim_F <= 0){
-    PyErr_Format(PyExc_TypeError, "dim_F argument must be greater than or equal to one");
-    return -1;
-  }
-
-  if(dim_G <= 0){
-    PyErr_Format(PyExc_TypeError, "dim_G argument must be greater than or equal to one");
-    return -1;
-  }
-
-  if(variance_threshold < 0){
-    PyErr_Format(PyExc_TypeError, "variance_threshold argument must be greater than or equal to zero");
-    return -1;
-  }
-
-  
-  self->cxx.reset(new bob::learn::misc::PLDABase(dim_D, dim_F, dim_G, variance_threshold));
-  return 0;
-}
-
-static int PyBobLearnMiscPLDABase_init(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs==1){
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is Gaussian object
-    if (PyBobLearnMiscPLDABase_Check(arg))
-      return PyBobLearnMiscPLDABase_init_copy(self, args, kwargs);
-    // If the constructor input is a HDF5
-    else if (PyBobIoHDF5File_Check(arg))
-      return PyBobLearnMiscPLDABase_init_hdf5(self, args, kwargs);
-  }
-  else if((nargs==3)||(nargs==4))
-    return PyBobLearnMiscPLDABase_init_dim(self, args, kwargs);
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1, 3 or 4 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-    PLDABase_doc.print_usage();
-    return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create PLDABase", 0)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscPLDABase_delete(PyBobLearnMiscPLDABaseObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscPLDABase_RichCompare(PyBobLearnMiscPLDABaseObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscPLDABase_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscPLDABaseObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare PLDABase objects", 0)
-}
-
-int PyBobLearnMiscPLDABase_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscPLDABase_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int)",
-  "A tuple that represents the dimensionality of the feature vector :math:`dim_d`, the :math:`F` matrix and the :math:`G` matrix.",
-  ""
-);
-PyObject* PyBobLearnMiscPLDABase_getShape(PyBobLearnMiscPLDABaseObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i)", self->cxx->getDimD(), self->cxx->getDimF(), self->cxx->getDimG());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-
-/***** F *****/
-static auto F = bob::extension::VariableDoc(
-  "f",
-  "array_like <float, 2D>",
-  "Returns the :math:`F` matrix (between class variantion matrix)",
-  ""
-);
-PyObject* PyBobLearnMiscPLDABase_getF(PyBobLearnMiscPLDABaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getF());
-  BOB_CATCH_MEMBER("`f` could not be read", 0)
-}
-int PyBobLearnMiscPLDABase_setF(PyBobLearnMiscPLDABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, F.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "f");
-  if (!b) return -1;
-  self->cxx->setF(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`f` vector could not be set", -1)
-}
-
-/***** G *****/
-static auto G = bob::extension::VariableDoc(
-  "g",
-  "array_like <float, 2D>",
-  "Returns the :math:`G` matrix (between class variantion matrix)",
-  ""
-);
-PyObject* PyBobLearnMiscPLDABase_getG(PyBobLearnMiscPLDABaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getG());
-  BOB_CATCH_MEMBER("`g` could not be read", 0)
-}
-int PyBobLearnMiscPLDABase_setG(PyBobLearnMiscPLDABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, G.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "g");
-  if (!b) return -1;
-  self->cxx->setG(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`g` vector could not be set", -1)
-}
-
-
-/***** mu *****/
-static auto mu = bob::extension::VariableDoc(
-  "mu",
-  "array_like <float, 1D>",
-  "Gets the :math:`mu` mean vector of the PLDA model",
-  ""
-);
-PyObject* PyBobLearnMiscPLDABase_getMu(PyBobLearnMiscPLDABaseObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMu());
-  BOB_CATCH_MEMBER("`mu` could not be read", 0)
-}
-int PyBobLearnMiscPLDABase_setMu(PyBobLearnMiscPLDABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, mu.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mu");
-  if (!b) return -1;
-  self->cxx->setMu(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`mu` vector could not be set", -1)
-}
-
-
-/***** __isigma__ *****/
-static auto __isigma__ = bob::extension::VariableDoc(
-  "__isigma__",
-  "array_like <float, 1D>",
-  "Gets the inverse vector/diagonal matrix of :math:`\\Sigma^{-1}`",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getISigma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getISigma());
-  BOB_CATCH_MEMBER("__isigma__ could not be read", 0)
-}
-
-
-/***** __alpha__ *****/
-static auto __alpha__ = bob::extension::VariableDoc(
-  "__alpha__",
-  "array_like <float, 2D>",
-  "Gets the \f$\alpha\f$ matrix."
-  ":math:`\\alpha = (Id + G^T \\Sigma^{-1} G)^{-1} = \\mathcal{G}`",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getAlpha(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAlpha());
-  BOB_CATCH_MEMBER("__alpha__ could not be read", 0)
-}
-
-
-/***** __beta__ *****/
-static auto __beta__ = bob::extension::VariableDoc(
-  "__beta__",
-  "array_like <float, 2D>",
-  "Gets the :math:`\\beta` matrix "
-  ":math:`\\beta = (\\Sigma + G G^T)^{-1} = \\mathcal{S} = \\Sigma^{-1} - \\Sigma^{-1} G \\mathcal{G} G^{T} \\Sigma^{-1}`",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getBeta(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getBeta());
-  BOB_CATCH_MEMBER("__beta__ could not be read", 0)
-}
-
-
-/***** __ft_beta__ *****/
-static auto __ft_beta__ = bob::extension::VariableDoc(
-  "__ft_beta__",
-  "array_like <float, 2D>",
-  "Gets the :math:`F^T \\beta' matrix",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getFtBeta(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getFtBeta());
-  BOB_CATCH_MEMBER("__ft_beta__ could not be read", 0)
-}
-
-
-/***** __gt_i_sigma__ *****/
-static auto __gt_i_sigma__ = bob::extension::VariableDoc(
-  "__gt_i_sigma__",
-  "array_like <float, 2D>",
-  "Gets the :math:`G^T \\Sigma^{-1}` matrix",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getGtISigma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getGtISigma());
-  BOB_CATCH_MEMBER("__gt_i_sigma__ could not be read", 0)
-}
-
-
-/***** __logdet_alpha__ *****/
-static auto __logdet_alpha__ = bob::extension::VariableDoc(
-  "__logdet_alpha__",
-  "double",
-  "Gets :math:`\\log(\\det(\\alpha))`",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getLogDetAlpha(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getLogDetAlpha());
-  BOB_CATCH_MEMBER("__logdet_alpha__ could not be read", 0)
-}
-
-/***** __logdet_sigma__ *****/
-static auto __logdet_sigma__ = bob::extension::VariableDoc(
-  "__logdet_sigma__",
-  "double",
-  "Gets :math:`\\log(\\det(\\Sigma))`",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getLogDetSigma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getLogDetSigma());
-  BOB_CATCH_MEMBER("__logdet_sigma__ could not be read", 0)
-}
-
-
-/***** variance_threshold *****/
-static auto variance_threshold = bob::extension::VariableDoc(
-  "variance_threshold",
-  "double",
-  "",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getVarianceThreshold(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getVarianceThreshold());
-  BOB_CATCH_MEMBER("variance_threshold could not be read", 0)
-}
-int PyBobLearnMiscPLDABase_setVarianceThreshold(PyBobLearnMiscPLDABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an float", Py_TYPE(self)->tp_name, variance_threshold.name());
-    return -1;
-  }
-
-  self->cxx->setVarianceThreshold(PyFloat_AS_DOUBLE(value));
-  BOB_CATCH_MEMBER("variance_threshold could not be set", -1)
-  return 0;
-}
-
-
-
-
-/***** sigma *****/
-static auto sigma = bob::extension::VariableDoc(
-  "sigma",
-  "array_like <float, 1D>",
-  "Gets the :math:`\\sigma` (diagonal) covariance matrix of the PLDA model",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDABase_getSigma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getSigma());
-  BOB_CATCH_MEMBER("sigma could not be read", 0)
-}
-int PyBobLearnMiscPLDABase_setSigma(PyBobLearnMiscPLDABaseObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, sigma.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "sigma");
-  if (!b) return -1;
-  self->cxx->setSigma(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`sigma` vector could not be set", -1)
-}
-
-
-static PyGetSetDef PyBobLearnMiscPLDABase_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscPLDABase_getShape,
-   0,
-   shape.doc(),
-   0
-  },  
-  {
-   F.name(),
-   (getter)PyBobLearnMiscPLDABase_getF,
-   (setter)PyBobLearnMiscPLDABase_setF,
-   F.doc(),
-   0
-  },
-  {
-   G.name(),
-   (getter)PyBobLearnMiscPLDABase_getG,
-   (setter)PyBobLearnMiscPLDABase_setG,
-   G.doc(),
-   0
-  },
-  {
-   mu.name(),
-   (getter)PyBobLearnMiscPLDABase_getMu,
-   (setter)PyBobLearnMiscPLDABase_setMu,
-   mu.doc(),
-   0
-  },
-  {
-   __isigma__.name(),
-   (getter)PyBobLearnMiscPLDABase_getISigma,
-   0,
-   __isigma__.doc(),
-   0
-  },
-  {
-   __alpha__.name(),
-   (getter)PyBobLearnMiscPLDABase_getAlpha,
-   0,
-   __alpha__.doc(),
-   0
-  },
-  {
-   __beta__.name(),
-   (getter)PyBobLearnMiscPLDABase_getBeta,
-   0,
-   __beta__.doc(),
-   0
-  },
-  {
-  __ft_beta__.name(),
-   (getter)PyBobLearnMiscPLDABase_getFtBeta,
-   0,
-   __ft_beta__.doc(),
-   0
-  },
-  {
-  __gt_i_sigma__.name(),
-   (getter)PyBobLearnMiscPLDABase_getGtISigma,
-   0,
-   __gt_i_sigma__.doc(),
-   0
-  },
-  {
-  __logdet_alpha__.name(),
-   (getter)PyBobLearnMiscPLDABase_getLogDetAlpha,
-   0,
-   __logdet_alpha__.doc(),
-   0
-  },
-  {
-  __logdet_sigma__.name(),
-   (getter)PyBobLearnMiscPLDABase_getLogDetSigma,
-   0,
-   __logdet_sigma__.doc(),
-   0
-  },
-  {
-   sigma.name(),
-   (getter)PyBobLearnMiscPLDABase_getSigma,
-   (setter)PyBobLearnMiscPLDABase_setSigma,
-   sigma.doc(),
-   0
-  },
-  {
-   variance_threshold.name(),
-   (getter)PyBobLearnMiscPLDABase_getVarianceThreshold,
-   (setter)PyBobLearnMiscPLDABase_setVarianceThreshold,
-   variance_threshold.doc(),
-   0
-  },
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the PLDABase to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscPLDABase_Save(PyBobLearnMiscPLDABaseObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the PLDABase to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscPLDABase_Load(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this PLDABase with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.PLDABase`", "A PLDABase object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscPLDABase_IsSimilarTo(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscPLDABaseObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscPLDABase_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/*** resize ***/
-static auto resize = bob::extension::FunctionDoc(
-  "resize",
-  "Resizes the dimensionality of the PLDA model. Paramaters :math:`\\mu`, :math:`\\F`, :math:`\\G` and :math:`\\Sigma` are reinitialized.",
-  0,
-  true
-)
-.add_prototype("dim_D,dim_F,dim_G")
-.add_parameter("dim_D", "int", "Dimensionality of the feature vector.")
-.add_parameter("dim_F", "int", "Size of :math:`F`(between class variantion matrix).")
-.add_parameter("dim_G", "int", "Size of :math:`F`(within class variantion matrix).");
-static PyObject* PyBobLearnMiscPLDABase_resize(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = resize.kwlist(0);
-
-  int dim_D, dim_F, dim_G = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iii", kwlist, &dim_D, &dim_F, &dim_G)) Py_RETURN_NONE;
-
-  if(dim_D <= 0){
-    PyErr_Format(PyExc_TypeError, "dim_D argument must be greater than or equal to one");
-    Py_RETURN_NONE;
-  }
-  
-  if(dim_F <= 0){
-    PyErr_Format(PyExc_TypeError, "dim_F argument must be greater than or equal to one");
-    Py_RETURN_NONE;
-  }
-
-  if(dim_G <= 0){
-    PyErr_Format(PyExc_TypeError, "dim_G argument must be greater than or equal to one");
-    Py_RETURN_NONE;
-  }
-
-  self->cxx->resize(dim_D, dim_F, dim_G);
-
-  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/***** get_gamma *****/
-static auto get_gamma = bob::extension::FunctionDoc(
-  "get_gamma",
-  "Gets the :math:`\\gamma_a` matrix for a given :math:`a` (number of samples). "
-  ":math:`gamma_{a} = (Id + a F^T \beta F)^{-1} = \\mathcal{F}_{a}`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","array_like <float, 2D>","Get the :math:`\\gamma` matrix");
-static PyObject* PyBobLearnMiscPLDABase_getGamma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_gamma.kwlist(0);
-
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getGamma(i));
-  BOB_CATCH_MEMBER("`get_gamma` could not be read", 0)
-}
-
-
-/***** has_gamma *****/
-static auto has_gamma = bob::extension::FunctionDoc(
-  "has_gamma",
-  "Tells if the :math:`gamma_a` matrix for a given a (number of samples) exists. "
-  ":math:`gamma_a=(Id + a F^T \\beta F)^{-1}`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","bool","");
-static PyObject* PyBobLearnMiscPLDABase_hasGamma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = has_gamma.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  if(self->cxx->hasGamma(i))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)    
-}
-
-
-/***** compute_gamma *****/
-static auto compute_gamma = bob::extension::FunctionDoc(
-  "compute_gamma",
-  "Tells if the :math:`gamma_a` matrix for a given a (number of samples) exists."
-  ":math:`gamma_a = (Id + a F^T \\beta F)^{-1}`",
-  0,
-  true
-)
-.add_prototype("a,res","")
-.add_parameter("a", "int", "Index")
-.add_parameter("res", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscPLDABase_computeGamma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = compute_gamma.kwlist(0);
-  int i = 0;
-  PyBlitzArrayObject* res = 0;  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &res)) Py_RETURN_NONE;
-
-  auto res_ = make_safe(res);  
-
-  self->cxx->computeGamma(i,*PyBlitzArrayCxx_AsBlitz<double,2>(res));
-  Py_RETURN_NONE;
-  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)    
-}
-
-/***** get_add_gamma *****/
-static auto get_add_gamma = bob::extension::FunctionDoc(
-  "get_add_gamma",
-   "Gets the :math:`gamma_a` matrix for a given :math:`f_a` (number of samples)."
-   ":math:`gamma_a = (Id + a F^T \\beta F)^{-1} = \\mathcal{F}_{a}`."
-   "Tries to find it from the base machine and then from this machine.",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnMiscPLDABase_getAddGamma(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_add_gamma.kwlist(0);
-
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAddGamma(i));
-  BOB_CATCH_MEMBER("`get_add_gamma` could not be read", 0)
-}
-
-
-/***** has_log_like_const_term *****/
-static auto has_log_like_const_term = bob::extension::FunctionDoc(
-  "has_log_like_const_term",
-   "Tells if the log likelihood constant term for a given :math:`a` (number of samples) exists in this machine (does not check the base machine). "
-   ":math:`l_{a}=\\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","bool","");
-static PyObject* PyBobLearnMiscPLDABase_hasLogLikeConstTerm(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = has_log_like_const_term.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  if(self->cxx->hasLogLikeConstTerm(i))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)    
-}
-
-
-/***** compute_log_like_const_term" *****/
-static auto compute_log_like_const_term = bob::extension::FunctionDoc(
-  "compute_log_like_const_term",
-  "Computes the log likelihood constant term for a given :math:`a` (number of samples), given the provided :math:`gamma_a` matrix. "
-  ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
-
-  0,
-  true
-)
-.add_prototype("a,res","")
-.add_parameter("a", "int", "Index")
-.add_parameter("res", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnMiscPLDABase_computeLogLikeConstTerm(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = compute_log_like_const_term.kwlist(0);
-  int i = 0;
-  PyBlitzArrayObject* res = 0;  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &res)) Py_RETURN_NONE;
-
-  auto res_ = make_safe(res);  
-
-  self->cxx->computeLogLikeConstTerm(i,*PyBlitzArrayCxx_AsBlitz<double,2>(res));
-  Py_RETURN_NONE;
-  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)    
-}
-
-
-/***** get_add_log_like_const_term *****/
-static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
-  "get_add_log_like_const_term",
-
-   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
-   ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","double","");
-static PyObject* PyBobLearnMiscPLDABase_getAddLogLikeConstTerm(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_add_log_like_const_term.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return Py_BuildValue("d",self->cxx->getAddLogLikeConstTerm(i));
-
-  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)    
-}
-
-
-/***** get_log_like_const_term *****/
-static auto get_log_like_const_term = bob::extension::FunctionDoc(
-  "get_log_like_const_term",
-   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
-    ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","double","");
-static PyObject* PyBobLearnMiscPLDABase_getLogLikeConstTerm(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_log_like_const_term.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return Py_BuildValue("d",self->cxx->getLogLikeConstTerm(i));
-
-  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)    
-}
-
-/***** clear_maps *****/
-static auto clear_maps = bob::extension::FunctionDoc(
-  "clear_maps",
-  "Clears the maps (:math:`gamma_a` and loglike_constterm_a).",
-  0,
-  true
-);
-static PyObject* PyBobLearnMiscPLDABase_clearMaps(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  self->cxx->clearMaps();
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)    
-}
-
-
-/***** compute_log_likelihood_point_estimate *****/
-static auto compute_log_likelihood_point_estimate = bob::extension::FunctionDoc(
-  "compute_log_likelihood_point_estimate",
-   "Gets the log-likelihood of an observation, given the current model and the latent variables (point estimate)."
-   "This will basically compute :math:`p(x_{ij} | h_{i}, w_{ij}, \\Theta)`, given by "
-   ":math:`\\mathcal{N}(x_{ij}|[\\mu + F h_{i} + G w_{ij} + \\epsilon_{ij}, \\Sigma])`, which is in logarithm, "
-   ":math:`\\frac{D}{2} log(2\\pi) -\\frac{1}{2} log(det(\\Sigma)) -\\frac{1}{2} {(x_{ij}-(\\mu+F h_{i}+G w_{ij}))^{T}\\Sigma^{-1}(x_{ij}-(\\mu+F h_{i}+G w_{ij}))}`",
-  0,
-  true
-)
-.add_prototype("xij,hi,wij","output")
-.add_parameter("xij", "array_like <float, 1D>", "")
-.add_parameter("hi", "array_like <float, 1D>", "")
-.add_parameter("wij", "array_like <float, 1D>", "")
-.add_return("output", "double", "");
-static PyObject* PyBobLearnMiscPLDABase_computeLogLikelihoodPointEstimate(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = compute_log_likelihood_point_estimate.kwlist(0);
-  PyBlitzArrayObject* xij, *hi, *wij;  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&", kwlist, &PyBlitzArray_Converter, &xij,
-                                                               &PyBlitzArray_Converter, &hi,
-                                                               &PyBlitzArray_Converter, &wij)) return 0;
-
-  auto xij_ = make_safe(xij);
-  auto hi_ = make_safe(hi);
-  auto wij_ = make_safe(wij);  
-
-  return Py_BuildValue("d", self->cxx->computeLogLikelihoodPointEstimate(*PyBlitzArrayCxx_AsBlitz<double,1>(xij), *PyBlitzArrayCxx_AsBlitz<double,1>(hi), *PyBlitzArrayCxx_AsBlitz<double,1>(wij)));
-  
-  BOB_CATCH_MEMBER("`compute_log_likelihood_point_estimate` could not be read", 0)    
-}
-
-/***** __precompute__ *****/
-static auto __precompute__ = bob::extension::FunctionDoc(
-  "__precompute__",
-  "Precomputes useful values for the log likelihood "
-  ":math:`\\log(\\det(\\alpha))` and :math:`\\log(\\det(\\Sigma))`.",
-  0,
-  true
-);
-static PyObject* PyBobLearnMiscPLDABase_precompute(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  self->cxx->precompute();
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("`precompute` could not be read", 0)    
-}
-
-
-/***** __precompute_log_like__ *****/
-static auto __precompute_log_like__ = bob::extension::FunctionDoc(
-  "__precompute_log_like__",
-
-  "Precomputes useful values for the log likelihood "
-  ":math:`\\log(\\det(\\alpha))` and :math:`\\log(\\det(\\Sigma))`.",
-
-  0,
-  true
-);
-static PyObject* PyBobLearnMiscPLDABase_precomputeLogLike(PyBobLearnMiscPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  self->cxx->precomputeLogLike();
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("`__precompute_log_like__` could not be read", 0)    
-}
-
-
-static PyMethodDef PyBobLearnMiscPLDABase_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    resize.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_resize,
-    METH_VARARGS|METH_KEYWORDS,
-    resize.doc()
-  },
-  {
-    get_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_getGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    get_gamma.doc()
-  },
-  {
-    has_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_hasGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    has_gamma.doc()
-  },
-  {
-    compute_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_computeGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_gamma.doc()
-  },
-  {
-    get_add_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_getAddGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    get_add_gamma.doc()
-  },
-  {
-    has_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_hasLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    has_log_like_const_term.doc()
-  },  
-  {
-    compute_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_computeLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_log_like_const_term.doc()
-  },  
-  {
-    get_add_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_getAddLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    get_add_log_like_const_term.doc()
-  },  
-  {
-    get_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_getLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    get_log_like_const_term.doc()
-  },  
-  {
-    clear_maps.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_clearMaps,
-    METH_NOARGS,
-    clear_maps.doc()
-  },
-  {
-    compute_log_likelihood_point_estimate.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_computeLogLikelihoodPointEstimate,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_log_likelihood_point_estimate.doc()
-  },
-  {
-    __precompute__.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_precompute,
-    METH_NOARGS,
-    __precompute__.doc()
-  },   
-  {
-    __precompute_log_like__.name(),
-    (PyCFunction)PyBobLearnMiscPLDABase_precomputeLogLike,
-    METH_NOARGS,
-    __precompute_log_like__.doc()
-  },     
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the JFA type struct; will be initialized later
-PyTypeObject PyBobLearnMiscPLDABase_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscPLDABase(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscPLDABase_Type.tp_name      = PLDABase_doc.name();
-  PyBobLearnMiscPLDABase_Type.tp_basicsize = sizeof(PyBobLearnMiscPLDABaseObject);
-  PyBobLearnMiscPLDABase_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscPLDABase_Type.tp_doc       = PLDABase_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscPLDABase_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscPLDABase_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscPLDABase_init);
-  PyBobLearnMiscPLDABase_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscPLDABase_delete);
-  PyBobLearnMiscPLDABase_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscPLDABase_RichCompare);
-  PyBobLearnMiscPLDABase_Type.tp_methods     = PyBobLearnMiscPLDABase_methods;
-  PyBobLearnMiscPLDABase_Type.tp_getset      = PyBobLearnMiscPLDABase_getseters;
-  //PyBobLearnMiscPLDABase_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscPLDABase_forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscPLDABase_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscPLDABase_Type);
-  return PyModule_AddObject(module, "PLDABase", (PyObject*)&PyBobLearnMiscPLDABase_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/plda_machine.cpp b/bob/learn/misc/plda_machine.cpp
deleted file mode 100644
index d5cd1c8fe1cbad4918ad33a1da835956cdf24683..0000000000000000000000000000000000000000
--- a/bob/learn/misc/plda_machine.cpp
+++ /dev/null
@@ -1,801 +0,0 @@
-/**
- * @date Thu Jan 30 11:10:15 2015 +0200
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-static auto PLDAMachine_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".PLDAMachine",
-
-  "This class is a container for an enrolled identity/class. It contains information extracted from the enrollment samples. "
-  "It should be used in combination with a PLDABase instance."
-  "References: [ElShafey2014,PrinceElder2007,LiFu2012]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-
-     "Constructor, builds a new PLDAMachine.",
-
-    "",
-    true
-  )
-  .add_prototype("plda_base","")
-  .add_prototype("other","")
-  .add_prototype("hdf5,plda_base","")
-
-  .add_parameter("plda_base", "`bob.learn.misc.PLDABase`", "")  
-  .add_parameter("other", ":py:class:`bob.learn.misc.PLDAMachine`", "A PLDAMachine object to be copied.")
-  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
-
-);
-
-
-static int PyBobLearnMiscPLDAMachine_init_copy(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDAMachine_doc.kwlist(1);
-  PyBobLearnMiscPLDAMachineObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscPLDAMachine_Type, &o)){
-    PLDAMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::PLDAMachine(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscPLDAMachine_init_hdf5(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDAMachine_doc.kwlist(2);
-
-  PyBobIoHDF5FileObject* config = 0;
-  PyBobLearnMiscPLDABaseObject* plda_base;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBobIoHDF5File_Converter, &config,
-                                                                 &PyBobLearnMiscPLDABase_Type, &plda_base)){
-    PLDAMachine_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::PLDAMachine(*(config->f),plda_base->cxx));
-
-  return 0;
-}
-
-
-static int PyBobLearnMiscPLDAMachine_init_pldabase(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDAMachine_doc.kwlist(0);  
-  PyBobLearnMiscPLDABaseObject* plda_base;
-  
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscPLDABase_Type, &plda_base)){
-    PLDAMachine_doc.print_usage();
-    return -1;
-  }
-  
-  self->cxx.reset(new bob::learn::misc::PLDAMachine(plda_base->cxx));
-  return 0;
-}
-
-static int PyBobLearnMiscPLDAMachine_init(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
- 
-  if(nargs==1){
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is Gaussian object
-    if (PyBobLearnMiscPLDAMachine_Check(arg))
-      return PyBobLearnMiscPLDAMachine_init_copy(self, args, kwargs);
-    // If the constructor input is a HDF5
-    else if (PyBobLearnMiscPLDABase_Check(arg))
-      return PyBobLearnMiscPLDAMachine_init_pldabase(self, args, kwargs);
-  }
-  else if(nargs==2)
-    return PyBobLearnMiscPLDAMachine_init_hdf5(self, args, kwargs);
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-    PLDAMachine_doc.print_usage();
-    return -1;
-  }
-  BOB_CATCH_MEMBER("cannot create PLDAMachine", 0)
-  return 0;
-}
-
-
-
-static void PyBobLearnMiscPLDAMachine_delete(PyBobLearnMiscPLDAMachineObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-static PyObject* PyBobLearnMiscPLDAMachine_RichCompare(PyBobLearnMiscPLDAMachineObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscPLDAMachine_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscPLDAMachineObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare PLDAMachine objects", 0)
-}
-
-int PyBobLearnMiscPLDAMachine_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscPLDAMachine_Type));
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-/***** shape *****/
-static auto shape = bob::extension::VariableDoc(
-  "shape",
-  "(int,int, int)",
-  "A tuple that represents the dimensionality of the feature vector :math:`dim_d`, the :math:`F` matrix and the :math:`G` matrix.",
-  ""
-);
-PyObject* PyBobLearnMiscPLDAMachine_getShape(PyBobLearnMiscPLDAMachineObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("(i,i,i)", self->cxx->getDimD(), self->cxx->getDimF(), self->cxx->getDimG());
-  BOB_CATCH_MEMBER("shape could not be read", 0)
-}
-
-
-/***** n_samples *****/
-static auto n_samples = bob::extension::VariableDoc(
-  "n_samples",
-  "int",
-  "Number of enrolled samples",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDAMachine_getNSamples(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return Py_BuildValue("i",self->cxx->getNSamples());
-  BOB_CATCH_MEMBER("n_samples could not be read", 0)
-}
-int PyBobLearnMiscPLDAMachine_setNSamples(PyBobLearnMiscPLDAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyInt_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an int", Py_TYPE(self)->tp_name, n_samples.name());
-    return -1;
-  }
-
-  if (PyInt_AS_LONG(value) < 0){
-    PyErr_Format(PyExc_TypeError, "n_samples must be greater than or equal to zero");
-    return -1;
-  }
-
-  self->cxx->setNSamples(PyInt_AS_LONG(value));
-  BOB_CATCH_MEMBER("n_samples could not be set", -1)
-  return 0;
-}
-
-
-/***** w_sum_xit_beta_xi *****/
-static auto w_sum_xit_beta_xi = bob::extension::VariableDoc(
-  "w_sum_xit_beta_xi",
-  "double",
-  "Gets the :math:`A = -0.5 \\sum_{i} x_{i}^T \\beta x_{i}` value",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDAMachine_getWSumXitBetaXi(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getWSumXitBetaXi());
-  BOB_CATCH_MEMBER("w_sum_xit_beta_xi could not be read", 0)
-}
-int PyBobLearnMiscPLDAMachine_setWSumXitBetaXi(PyBobLearnMiscPLDAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an float", Py_TYPE(self)->tp_name, w_sum_xit_beta_xi.name());
-    return -1;
-  }
-
-  self->cxx->setWSumXitBetaXi(PyFloat_AS_DOUBLE(value));
-  BOB_CATCH_MEMBER("w_sum_xit_beta_xi could not be set", -1)
-  return 0;
-}
-
-
-/***** plda_base *****/
-static auto plda_base = bob::extension::VariableDoc(
-  "plda_base",
-  ":py:class:`bob.learn.misc.PLDABase`",
-  "The PLDABase attached to this machine",
-  ""
-);
-PyObject* PyBobLearnMiscPLDAMachine_getPLDABase(PyBobLearnMiscPLDAMachineObject* self, void*){
-  BOB_TRY
-
-  boost::shared_ptr<bob::learn::misc::PLDABase> plda_base_o = self->cxx->getPLDABase();
-
-  //Allocating the correspondent python object
-  PyBobLearnMiscPLDABaseObject* retval =
-    (PyBobLearnMiscPLDABaseObject*)PyBobLearnMiscPLDABase_Type.tp_alloc(&PyBobLearnMiscPLDABase_Type, 0);
-  retval->cxx = plda_base_o;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("plda_base could not be read", 0)
-}
-int PyBobLearnMiscPLDAMachine_setPLDABase(PyBobLearnMiscPLDAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnMiscPLDABase_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.misc.PLDABase`", Py_TYPE(self)->tp_name, plda_base.name());
-    return -1;
-  }
-
-  PyBobLearnMiscPLDABaseObject* plda_base_o = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnMiscPLDABase_Type,&plda_base_o);
-
-  self->cxx->setPLDABase(plda_base_o->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("plda_base could not be set", -1)  
-}
-
-
-/***** weighted_sum *****/
-static auto weighted_sum = bob::extension::VariableDoc(
-  "weighted_sum",
-  "array_like <float, 1D>",
-  "Get/Set :math:``\\sum_{i} F^T \\beta x_{i}` value",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDAMachine_getWeightedSum(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getWeightedSum());
-  BOB_CATCH_MEMBER("weighted_sum could not be read", 0)
-}
-int PyBobLearnMiscPLDAMachine_setWeightedSum(PyBobLearnMiscPLDAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, weighted_sum.name());
-    return -1;
-  }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "weighted_sum");
-  if (!b) return -1;
-  self->cxx->setWeightedSum(*b);
-  return 0;
-  BOB_CATCH_MEMBER("`weighted_sum` vector could not be set", -1)
-}
-
-
-/***** log_likelihood *****/
-static auto log_likelihood = bob::extension::VariableDoc(
-  "log_likelihood",
-  "double",
-  "",
-  ""
-);
-static PyObject* PyBobLearnMiscPLDAMachine_getLogLikelihood(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getLogLikelihood());
-  BOB_CATCH_MEMBER("log_likelihood could not be read", 0)
-}
-int PyBobLearnMiscPLDAMachine_setLogLikelihood(PyBobLearnMiscPLDAMachineObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyNumber_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, log_likelihood.name());
-    return -1;
-  }
-
-  self->cxx->setLogLikelihood(PyFloat_AS_DOUBLE(value));
-  BOB_CATCH_MEMBER("log_likelihood could not be set", -1)
-  return 0;
-}
-
-
-static PyGetSetDef PyBobLearnMiscPLDAMachine_getseters[] = { 
-  {
-   shape.name(),
-   (getter)PyBobLearnMiscPLDAMachine_getShape,
-   0,
-   shape.doc(),
-   0
-  },  
-  {
-   n_samples.name(),
-   (getter)PyBobLearnMiscPLDAMachine_getNSamples,
-   (setter)PyBobLearnMiscPLDAMachine_setNSamples,
-   n_samples.doc(),
-   0
-  },  
-  {
-   w_sum_xit_beta_xi.name(),
-   (getter)PyBobLearnMiscPLDAMachine_getWSumXitBetaXi,
-   (setter)PyBobLearnMiscPLDAMachine_setWSumXitBetaXi,
-   w_sum_xit_beta_xi.doc(),
-   0
-  },
-  {
-   plda_base.name(),
-   (getter)PyBobLearnMiscPLDAMachine_getPLDABase,
-   (setter)PyBobLearnMiscPLDAMachine_setPLDABase,
-   plda_base.doc(),
-   0
-  },
-  {
-   weighted_sum.name(),
-   (getter)PyBobLearnMiscPLDAMachine_getWeightedSum,
-   (setter)PyBobLearnMiscPLDAMachine_setWeightedSum,
-   weighted_sum.doc(),
-   0
-  },
-  {
-   log_likelihood.name(),
-   (getter)PyBobLearnMiscPLDAMachine_getLogLikelihood,
-   (setter)PyBobLearnMiscPLDAMachine_setLogLikelihood,
-   log_likelihood.doc(),
-   0
-  },
-  {0}  // Sentinel
-};
-
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-
-/*** save ***/
-static auto save = bob::extension::FunctionDoc(
-  "save",
-  "Save the configuration of the PLDAMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
-static PyObject* PyBobLearnMiscPLDAMachine_Save(PyBobLearnMiscPLDAMachineObject* self,  PyObject* args, PyObject* kwargs) {
-
-  BOB_TRY
-  
-  // get list of arguments
-  char** kwlist = save.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-
-  auto hdf5_ = make_safe(hdf5);
-  self->cxx->save(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot save the data", 0)
-  Py_RETURN_NONE;
-}
-
-/*** load ***/
-static auto load = bob::extension::FunctionDoc(
-  "load",
-  "Load the configuration of the PLDAMachine to a given HDF5 file"
-)
-.add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
-static PyObject* PyBobLearnMiscPLDAMachine_Load(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
-  PyBobIoHDF5FileObject* hdf5;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
-  self->cxx->load(*hdf5->f);
-
-  BOB_CATCH_MEMBER("cannot load the data", 0)
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this PLDAMachine with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.PLDAMachine`", "A PLDAMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscPLDAMachine_IsSimilarTo(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscPLDAMachineObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscPLDAMachine_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-/***** get_gamma *****/
-static auto get_gamma = bob::extension::FunctionDoc(
-  "get_gamma",
-  "Gets the :math:`\\gamma_a` matrix for a given :math:`a` (number of samples). "
-  ":math:`gamma_{a} = (Id + a F^T \beta F)^{-1} = \\mathcal{F}_{a}`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","array_like <float, 2D>","Get the :math:`\\gamma` matrix");
-static PyObject* PyBobLearnMiscPLDAMachine_getGamma(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_gamma.kwlist(0);
-
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getGamma(i));
-  BOB_CATCH_MEMBER("`get_gamma` could not be read", 0)
-}
-
-
-/***** has_gamma *****/
-static auto has_gamma = bob::extension::FunctionDoc(
-  "has_gamma",
-  "Tells if the :math:`gamma_a` matrix for a given a (number of samples) exists. "
-  ":math:`gamma_a=(Id + a F^T \\beta F)^{-1}`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","bool","");
-static PyObject* PyBobLearnMiscPLDAMachine_hasGamma(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = has_gamma.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  if(self->cxx->hasGamma(i))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)    
-}
-
-
-/***** get_add_gamma *****/
-static auto get_add_gamma = bob::extension::FunctionDoc(
-  "get_add_gamma",
-   "Gets the :math:`gamma_a` matrix for a given :math:`f_a` (number of samples)."
-   ":math:`gamma_a = (Id + a F^T \\beta F)^{-1} = \\mathcal{F}_{a}`."
-   "Tries to find it from the base machine and then from this machine.",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnMiscPLDAMachine_getAddGamma(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_add_gamma.kwlist(0);
-
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAddGamma(i));
-  BOB_CATCH_MEMBER("`get_add_gamma` could not be read", 0)
-}
-
-
-/***** has_log_like_const_term *****/
-static auto has_log_like_const_term = bob::extension::FunctionDoc(
-  "has_log_like_const_term",
-   "Tells if the log likelihood constant term for a given :math:`a` (number of samples) exists in this machine (does not check the base machine). "
-   ":math:`l_{a}=\\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","bool","");
-static PyObject* PyBobLearnMiscPLDAMachine_hasLogLikeConstTerm(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = has_log_like_const_term.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  if(self->cxx->hasLogLikeConstTerm(i))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)    
-}
-
-
-/***** get_add_log_like_const_term *****/
-static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
-  "get_add_log_like_const_term",
-
-   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
-   ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","double","");
-static PyObject* PyBobLearnMiscPLDAMachine_getAddLogLikeConstTerm(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_add_log_like_const_term.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return Py_BuildValue("d",self->cxx->getAddLogLikeConstTerm(i));
-
-  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)    
-}
-
-
-/***** get_log_like_const_term *****/
-static auto get_log_like_const_term = bob::extension::FunctionDoc(
-  "get_log_like_const_term",
-   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
-    ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)",
-  0,
-  true
-)
-.add_prototype("a","output")
-.add_parameter("a", "int", "Index")
-.add_return("output","double","");
-static PyObject* PyBobLearnMiscPLDAMachine_getLogLikeConstTerm(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = get_log_like_const_term.kwlist(0);
-  int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
-
-  return Py_BuildValue("d",self->cxx->getLogLikeConstTerm(i));
-
-  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)    
-}
-
-/***** clear_maps *****/
-static auto clear_maps = bob::extension::FunctionDoc(
-  "clear_maps",
-  "Clears the maps (:math:`gamma_a` and loglike_constterm_a).",
-  0,
-  true
-);
-static PyObject* PyBobLearnMiscPLDAMachine_clearMaps(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  self->cxx->clearMaps();
-  Py_RETURN_NONE;
-
-  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)    
-}
-
-
-/***** compute_log_likelihood *****/
-static auto compute_log_likelihood = bob::extension::FunctionDoc(
-  "compute_log_likelihood",
-  "Compute the log-likelihood of the given sample and (optionally) the enrolled samples",
-  0,
-  true
-)
-.add_prototype("sample,with_enrolled_samples","output")
-.add_parameter("sample", "array_like <float, 1D>", "Sample")
-.add_parameter("with_enrolled_samples", "bool", "")
-.add_return("output","double","The log-likelihood");
-static PyObject* PyBobLearnMiscPLDAMachine_computeLogLikelihood(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = compute_log_likelihood.kwlist(0);
-
-  PyBlitzArrayObject* samples;
-  PyObject* with_enrolled_samples = Py_True;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&|O!", kwlist, &PyBlitzArray_Converter, &samples,
-                                                                 &PyBool_Type, &with_enrolled_samples)) Py_RETURN_NONE;
-  auto samples_ = make_safe(samples);
-
-  blitz::Array<double,2>  blitz_test = *PyBlitzArrayCxx_AsBlitz<double,2>(samples);
-
-   //There are 2 methods in C++, one <double,1> and the another <double,2>
-  if (blitz_test.extent(1)==0)
-    return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(samples), f(with_enrolled_samples)));
-  else
-    return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,2>(samples), f(with_enrolled_samples)));
-
-  BOB_CATCH_MEMBER("`compute_log_likelihood` could not be read", 0)    
-}
-
-
-/***** forward *****/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
-  "Computes a log likelihood ratio from a 1D or 2D blitz::Array",
-  0,
-  true
-)
-.add_prototype("samples","output")
-.add_parameter("samples", "array_like <float, 1D>", "Sample")
-.add_return("output","double","The log-likelihood ratio");
-static PyObject* PyBobLearnMiscPLDAMachine_forward(PyBobLearnMiscPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-  
-  char** kwlist = forward.kwlist(0);
-
-  PyBlitzArrayObject* samples;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &samples)) Py_RETURN_NONE;
-  auto samples_ = make_safe(samples);
-  blitz::Array<double,2>  blitz_test = *PyBlitzArrayCxx_AsBlitz<double,2>(samples);
-
-   //There are 2 methods in C++, one <double,1> and the another <double,2>
-  if (blitz_test.extent(1)==0)
-    return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,1>(samples)));
-  else
-    return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,2>(samples)));
-
-  BOB_CATCH_MEMBER("`forward` could not be read", 0)    
-}
-
-
-static PyMethodDef PyBobLearnMiscPLDAMachine_methods[] = {
-  {
-    save.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_Save,
-    METH_VARARGS|METH_KEYWORDS,
-    save.doc()
-  },
-  {
-    load.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_Load,
-    METH_VARARGS|METH_KEYWORDS,
-    load.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {
-    get_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_getGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    get_gamma.doc()
-  },
-  {
-    has_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_hasGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    has_gamma.doc()
-  },
-  {
-    get_add_gamma.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_getAddGamma,
-    METH_VARARGS|METH_KEYWORDS,
-    get_add_gamma.doc()
-  },
-  {
-    has_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_hasLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    has_log_like_const_term.doc()
-  },  
-  {
-    get_add_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_getAddLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    get_add_log_like_const_term.doc()
-  },
-  {
-    get_log_like_const_term.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_getLogLikeConstTerm,
-    METH_VARARGS|METH_KEYWORDS,
-    get_log_like_const_term.doc()
-  },  
-  {
-    clear_maps.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_clearMaps,
-    METH_NOARGS,
-    clear_maps.doc()
-  },
-  {
-    compute_log_likelihood.name(),
-    (PyCFunction)PyBobLearnMiscPLDAMachine_computeLogLikelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_log_likelihood.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the JFA type struct; will be initialized later
-PyTypeObject PyBobLearnMiscPLDAMachine_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscPLDAMachine(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscPLDAMachine_Type.tp_name      = PLDAMachine_doc.name();
-  PyBobLearnMiscPLDAMachine_Type.tp_basicsize = sizeof(PyBobLearnMiscPLDAMachineObject);
-  PyBobLearnMiscPLDAMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnMiscPLDAMachine_Type.tp_doc       = PLDAMachine_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscPLDAMachine_Type.tp_new         = PyType_GenericNew;
-  PyBobLearnMiscPLDAMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnMiscPLDAMachine_init);
-  PyBobLearnMiscPLDAMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnMiscPLDAMachine_delete);
-  PyBobLearnMiscPLDAMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscPLDAMachine_RichCompare);
-  PyBobLearnMiscPLDAMachine_Type.tp_methods     = PyBobLearnMiscPLDAMachine_methods;
-  PyBobLearnMiscPLDAMachine_Type.tp_getset      = PyBobLearnMiscPLDAMachine_getseters;
-  PyBobLearnMiscPLDAMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnMiscPLDAMachine_forward);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscPLDAMachine_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscPLDAMachine_Type);
-  return PyModule_AddObject(module, "PLDAMachine", (PyObject*)&PyBobLearnMiscPLDAMachine_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/plda_trainer.cpp b/bob/learn/misc/plda_trainer.cpp
deleted file mode 100644
index 2f76dc32fecc863a39d0592374e2954924d033e3..0000000000000000000000000000000000000000
--- a/bob/learn/misc/plda_trainer.cpp
+++ /dev/null
@@ -1,704 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Wed 04 Feb 14:15:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-#include <boost/make_shared.hpp>
-
-//Defining maps for each initializatio method
-static const std::map<std::string, bob::learn::misc::PLDATrainer::InitFMethod> FMethod = {{"RANDOM_F",  bob::learn::misc::PLDATrainer::RANDOM_F}, {"BETWEEN_SCATTER", bob::learn::misc::PLDATrainer::BETWEEN_SCATTER}};
-
-static const std::map<std::string, bob::learn::misc::PLDATrainer::InitGMethod> GMethod = {{"RANDOM_G",  bob::learn::misc::PLDATrainer::RANDOM_G}, {"WITHIN_SCATTER", bob::learn::misc::PLDATrainer::WITHIN_SCATTER}};
-
-static const std::map<std::string, bob::learn::misc::PLDATrainer::InitSigmaMethod> SigmaMethod = {{"RANDOM_SIGMA",  bob::learn::misc::PLDATrainer::RANDOM_SIGMA}, {"VARIANCE_G", bob::learn::misc::PLDATrainer::VARIANCE_G}, {"CONSTANT", bob::learn::misc::PLDATrainer::CONSTANT}, {"VARIANCE_DATA", bob::learn::misc::PLDATrainer::VARIANCE_DATA}};
-
-
-
-//String to type
-static inline bob::learn::misc::PLDATrainer::InitFMethod string2FMethod(const std::string& o){
-  auto it = FMethod.find(o);
-  if (it == FMethod.end()) throw std::runtime_error("The given FMethod '" + o + "' is not known; choose one of ('RANDOM_F','BETWEEN_SCATTER')");
-  else return it->second;
-}
-
-static inline bob::learn::misc::PLDATrainer::InitGMethod string2GMethod(const std::string& o){
-  auto it = GMethod.find(o);
-  if (it == GMethod.end()) throw std::runtime_error("The given GMethod '" + o + "' is not known; choose one of ('RANDOM_G','WITHIN_SCATTER')");
-  else return it->second;
-}
-
-static inline bob::learn::misc::PLDATrainer::InitSigmaMethod string2SigmaMethod(const std::string& o){
-  auto it = SigmaMethod.find(o);
-  if (it == SigmaMethod.end()) throw std::runtime_error("The given SigmaMethod '" + o + "' is not known; choose one of ('RANDOM_SIGMA','VARIANCE_G', 'CONSTANT', 'VARIANCE_DATA')");
-  else return it->second;
-}
-
-//Type to string
-static inline const std::string& FMethod2string(bob::learn::misc::PLDATrainer::InitFMethod o){
-  for (auto it = FMethod.begin(); it != FMethod.end(); ++it) if (it->second == o) return it->first;
-  throw std::runtime_error("The given FMethod type is not known");
-}
-
-static inline const std::string& GMethod2string(bob::learn::misc::PLDATrainer::InitGMethod o){
-  for (auto it = GMethod.begin(); it != GMethod.end(); ++it) if (it->second == o) return it->first;
-  throw std::runtime_error("The given GMethod type is not known");
-}
-
-static inline const std::string& SigmaMethod2string(bob::learn::misc::PLDATrainer::InitSigmaMethod o){
-  for (auto it = SigmaMethod.begin(); it != SigmaMethod.end(); ++it) if (it->second == o) return it->first;
-  throw std::runtime_error("The given SigmaMethod type is not known");
-}
-
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-template <int N>
-int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
-{
-  for (int i=0; i<PyList_GET_SIZE(list); i++)
-  {
-    PyBlitzArrayObject* blitz_object; 
-    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
-      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
-      return -1;
-    }
-    auto blitz_object_ = make_safe(blitz_object);
-    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
-  }
-  return 0;
-}
-
-
-template <int N>
-static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
-{
-  PyObject* list = PyList_New(vec.size());
-  for(size_t i=0; i<vec.size(); i++){
-    blitz::Array<double,N> numpy_array = vec[i];
-    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
-    PyList_SET_ITEM(list, i, numpy_py_object);
-  }
-  return list;
-}
-
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-
-static auto PLDATrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".PLDATrainer",
-  "This class can be used to train the :math:`$F$`, :math:`$G$ and "
-  " :math:`$\\Sigma$` matrices and the mean vector :math:`$\\mu$` of a PLDA model."
-  "References: [ElShafey2014,PrinceElder2007,LiFu2012]",
-  ""
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Default constructor.\n Initializes a new PLDA trainer. The "
-    "training stage will place the resulting components in the "
-    "PLDABase.",
-    "",
-    true
-  )
-  .add_prototype("use_sum_second_order","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("other", ":py:class:`bob.learn.misc.PLDATrainer`", "A PLDATrainer object to be copied.")
-  .add_parameter("use_sum_second_order", "bool", "")
-);
-
-static int PyBobLearnMiscPLDATrainer_init_copy(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDATrainer_doc.kwlist(1);
-  PyBobLearnMiscPLDATrainerObject* o;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscPLDATrainer_Type, &o)){
-    PLDATrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::misc::PLDATrainer(*o->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnMiscPLDATrainer_init_bool(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = PLDATrainer_doc.kwlist(0);
-  PyObject* use_sum_second_order;
-
-  //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBool_Type, &use_sum_second_order))
-    return -1;
-
-  self->cxx.reset(new bob::learn::misc::PLDATrainer(f(use_sum_second_order)));
-  return 0;
-}
-
-
-static int PyBobLearnMiscPLDATrainer_init(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  // get the number of command line arguments
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs==1){
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-      
-    if(PyBobLearnMiscPLDATrainer_Check(arg))
-      // If the constructor input is PLDATrainer object
-      return PyBobLearnMiscPLDATrainer_init_copy(self, args, kwargs);
-    else
-      return PyBobLearnMiscPLDATrainer_init_bool(self, args, kwargs);
-  }
-  else{
-    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
-    PLDATrainer_doc.print_usage();
-    return -1;
-  }
-
-  BOB_CATCH_MEMBER("cannot create PLDATrainer", 0)
-  return 0;
-}
-
-
-static void PyBobLearnMiscPLDATrainer_delete(PyBobLearnMiscPLDATrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnMiscPLDATrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscPLDATrainer_Type));
-}
-
-
-static PyObject* PyBobLearnMiscPLDATrainer_RichCompare(PyBobLearnMiscPLDATrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnMiscPLDATrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnMiscPLDATrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare PLDATrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-static auto z_second_order = bob::extension::VariableDoc(
-  "z_second_order",
-  "array_like <float, 3D>",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_get_z_second_order(PyBobLearnMiscPLDATrainerObject* self, void*){
-  BOB_TRY
-  //return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZSecondOrder());
-  return vector_as_list(self->cxx->getZSecondOrder());
-  BOB_CATCH_MEMBER("z_second_order could not be read", 0)
-}
-
-
-static auto z_second_order_sum = bob::extension::VariableDoc(
-  "z_second_order_sum",
-  "array_like <float, 2D>",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_get_z_second_order_sum(PyBobLearnMiscPLDATrainerObject* self, void*){
-  BOB_TRY
-  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZSecondOrderSum());
-  BOB_CATCH_MEMBER("z_second_order_sum could not be read", 0)
-}
-
-
-static auto z_first_order = bob::extension::VariableDoc(
-  "z_first_order",
-  "array_like <float, 2D>",
-  "",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_get_z_first_order(PyBobLearnMiscPLDATrainerObject* self, void*){
-  BOB_TRY
-  //return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZFirstOrder());
-  return vector_as_list(self->cxx->getZFirstOrder());
-  BOB_CATCH_MEMBER("z_first_order could not be read", 0)
-}
-
-
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_getRng(PyBobLearnMiscPLDATrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("O", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnMiscPLDATrainer_setRng(PyBobLearnMiscPLDATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* rng_object = 0;
-  PyArg_Parse(value, "O!", &PyBoostMt19937_Type, &rng_object);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)rng_object->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
-/***** init_f_method *****/
-static auto init_f_method = bob::extension::VariableDoc(
-  "init_f_method",
-  "str",
-  "The method used for the initialization of :math:`$F$`.",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_getFMethod(PyBobLearnMiscPLDATrainerObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("s", FMethod2string(self->cxx->getInitFMethod()).c_str());
-  BOB_CATCH_MEMBER("init_f_method method could not be read", 0)
-}
-int PyBobLearnMiscPLDATrainer_setFMethod(PyBobLearnMiscPLDATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyString_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, init_f_method.name());
-    return -1;
-  }
-  self->cxx->setInitFMethod(string2FMethod(PyString_AS_STRING(value)));
-
-  return 0;
-  BOB_CATCH_MEMBER("init_f_method method could not be set", 0)
-}
-
-
-/***** init_g_method *****/
-static auto init_g_method = bob::extension::VariableDoc(
-  "init_g_method",
-  "str",
-  "The method used for the initialization of :math:`$G$`.",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_getGMethod(PyBobLearnMiscPLDATrainerObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("s", GMethod2string(self->cxx->getInitGMethod()).c_str());
-  BOB_CATCH_MEMBER("init_g_method method could not be read", 0)
-}
-int PyBobLearnMiscPLDATrainer_setGMethod(PyBobLearnMiscPLDATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyString_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, init_g_method.name());
-    return -1;
-  }
-  self->cxx->setInitGMethod(string2GMethod(PyString_AS_STRING(value)));
-
-  return 0;
-  BOB_CATCH_MEMBER("init_g_method method could not be set", 0)
-}
-
-
-/***** init_sigma_method *****/
-static auto init_sigma_method = bob::extension::VariableDoc(
-  "init_sigma_method",
-  "str",
-  "The method used for the initialization of :math:`$\\Sigma$`.",
-  ""
-);
-PyObject* PyBobLearnMiscPLDATrainer_getSigmaMethod(PyBobLearnMiscPLDATrainerObject* self, void*) {
-  BOB_TRY
-  return Py_BuildValue("s", SigmaMethod2string(self->cxx->getInitSigmaMethod()).c_str());
-  BOB_CATCH_MEMBER("init_sigma_method method could not be read", 0)
-}
-int PyBobLearnMiscPLDATrainer_setSigmaMethod(PyBobLearnMiscPLDATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyString_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, init_sigma_method.name());
-    return -1;
-  }
-  self->cxx->setInitSigmaMethod(string2SigmaMethod(PyString_AS_STRING(value)));
-
-  return 0;
-  BOB_CATCH_MEMBER("init_sigma_method method could not be set", 0)
-}
-
-
-static PyGetSetDef PyBobLearnMiscPLDATrainer_getseters[] = { 
-  {
-   z_first_order.name(),
-   (getter)PyBobLearnMiscPLDATrainer_get_z_first_order,
-   0,
-   z_first_order.doc(),
-   0
-  },
-  {
-   z_second_order_sum.name(),
-   (getter)PyBobLearnMiscPLDATrainer_get_z_second_order_sum,
-   0,
-   z_second_order_sum.doc(),
-   0
-  },
-  {
-   z_second_order.name(),
-   (getter)PyBobLearnMiscPLDATrainer_get_z_second_order,
-   0,
-   z_second_order.doc(),
-   0
-  },
-  {
-   rng.name(),
-   (getter)PyBobLearnMiscPLDATrainer_getRng,
-   (setter)PyBobLearnMiscPLDATrainer_setRng,
-   rng.doc(),
-   0
-  },
-  {
-   init_f_method.name(),
-   (getter)PyBobLearnMiscPLDATrainer_getFMethod,
-   (setter)PyBobLearnMiscPLDATrainer_setFMethod,
-   init_f_method.doc(),
-   0
-  },
-  {
-   init_g_method.name(),
-   (getter)PyBobLearnMiscPLDATrainer_getGMethod,
-   (setter)PyBobLearnMiscPLDATrainer_setGMethod,
-   init_g_method.doc(),
-   0
-  },
-  {
-   init_sigma_method.name(),
-   (getter)PyBobLearnMiscPLDATrainer_getSigmaMethod,
-   (setter)PyBobLearnMiscPLDATrainer_setSigmaMethod,
-   init_sigma_method.doc(),
-   0
-  },  
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "",
-  true
-)
-.add_prototype("plda_base,data")
-.add_parameter("plda_base", ":py:class:`bob.learn.misc.PLDABase`", "PLDAMachine Object")
-.add_parameter("data", "list", "");
-static PyObject* PyBobLearnMiscPLDATrainer_initialize(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnMiscPLDABaseObject* plda_base = 0;
-  PyObject* data = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscPLDABase_Type, &plda_base,
-                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
-
-  std::vector<blitz::Array<double,2> > data_vector;
-  if(list_as_vector(data ,data_vector)==0)
-    self->cxx->initialize(*plda_base->cxx, data_vector);
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** e_step ***/
-static auto e_step = bob::extension::FunctionDoc(
-  "e_step",
-  "e_step before the EM steps",
-  "",
-  true
-)
-.add_prototype("plda_base,data")
-.add_parameter("plda_base", ":py:class:`bob.learn.misc.PLDABase`", "PLDAMachine Object")
-.add_parameter("data", "list", "");
-static PyObject* PyBobLearnMiscPLDATrainer_e_step(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = e_step.kwlist(0);
-
-  PyBobLearnMiscPLDABaseObject* plda_base = 0;
-  PyObject* data = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscPLDABase_Type, &plda_base,
-                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
-
-  std::vector<blitz::Array<double,2> > data_vector;
-  if(list_as_vector(data ,data_vector)==0)
-    self->cxx->eStep(*plda_base->cxx, data_vector);
-
-  BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** m_step ***/
-static auto m_step = bob::extension::FunctionDoc(
-  "m_step",
-  "m_step before the EM steps",
-  "",
-  true
-)
-.add_prototype("plda_base,data")
-.add_parameter("plda_base", ":py:class:`bob.learn.misc.PLDABase`", "PLDAMachine Object")
-.add_parameter("data", "list", "");
-static PyObject* PyBobLearnMiscPLDATrainer_m_step(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = m_step.kwlist(0);
-
-  PyBobLearnMiscPLDABaseObject* plda_base = 0;
-  PyObject* data = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscPLDABase_Type, &plda_base,
-                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
-
-  std::vector<blitz::Array<double,2> > data_vector;
-  if(list_as_vector(data ,data_vector)==0)
-    self->cxx->mStep(*plda_base->cxx, data_vector);
-
-  BOB_CATCH_MEMBER("cannot perform the m_step method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** finalize ***/
-static auto finalize = bob::extension::FunctionDoc(
-  "finalize",
-  "finalize before the EM steps",
-  "",
-  true
-)
-.add_prototype("plda_base,data")
-.add_parameter("plda_base", ":py:class:`bob.learn.misc.PLDABase`", "PLDAMachine Object")
-.add_parameter("data", "list", "");
-static PyObject* PyBobLearnMiscPLDATrainer_finalize(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = finalize.kwlist(0);
-
-  PyBobLearnMiscPLDABaseObject* plda_base = 0;
-  PyObject* data = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscPLDABase_Type, &plda_base,
-                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
-
-  std::vector<blitz::Array<double,2> > data_vector;
-  if(list_as_vector(data ,data_vector)==0)
-    self->cxx->finalize(*plda_base->cxx, data_vector);
-
-  BOB_CATCH_MEMBER("cannot perform the finalize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-/*** enrol ***/
-static auto enrol = bob::extension::FunctionDoc(
-  "enrol",
-  "Main procedure for enrolling a PLDAMachine",
-  "",
-  true
-)
-.add_prototype("plda_machine,data")
-.add_parameter("plda_machine", ":py:class:`bob.learn.misc.PLDAMachine`", "PLDAMachine Object")
-.add_parameter("data", "list", "");
-static PyObject* PyBobLearnMiscPLDATrainer_enrol(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = enrol.kwlist(0);
-
-  PyBobLearnMiscPLDAMachineObject* plda_machine = 0;
-  PyBlitzArrayObject* data = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnMiscPLDAMachine_Type, &plda_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-
-  auto data_ = make_safe(data);
-  self->cxx->enrol(*plda_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** is_similar_to ***/
-static auto is_similar_to = bob::extension::FunctionDoc(
-  "is_similar_to",
-  
-  "Compares this PLDATrainer with the ``other`` one to be approximately the same.",
-  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
-  "relative and absolute precision for the ``weights``, ``biases`` "
-  "and any other values internal to this machine."
-)
-.add_prototype("other, [r_epsilon], [a_epsilon]","output")
-.add_parameter("other", ":py:class:`bob.learn.misc.PLDAMachine`", "A PLDAMachine object to be compared.")
-.add_parameter("r_epsilon", "float", "Relative precision.")
-.add_parameter("a_epsilon", "float", "Absolute precision.")
-.add_return("output","bool","True if it is similar, otherwise false.");
-static PyObject* PyBobLearnMiscPLDATrainer_IsSimilarTo(PyBobLearnMiscPLDATrainerObject* self, PyObject* args, PyObject* kwds) {
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = is_similar_to.kwlist(0);
-
-  //PyObject* other = 0;
-  PyBobLearnMiscPLDATrainerObject* other = 0;
-  double r_epsilon = 1.e-5;
-  double a_epsilon = 1.e-8;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
-        &PyBobLearnMiscPLDATrainer_Type, &other,
-        &r_epsilon, &a_epsilon)){
-
-        is_similar_to.print_usage(); 
-        return 0;        
-  }
-
-  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
-    Py_RETURN_TRUE;
-  else
-    Py_RETURN_FALSE;
-}
-
-
-
-static PyMethodDef PyBobLearnMiscPLDATrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnMiscPLDATrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    e_step.name(),
-    (PyCFunction)PyBobLearnMiscPLDATrainer_e_step,
-    METH_VARARGS|METH_KEYWORDS,
-    e_step.doc()
-  },
-  {
-    m_step.name(),
-    (PyCFunction)PyBobLearnMiscPLDATrainer_m_step,
-    METH_VARARGS|METH_KEYWORDS,
-    m_step.doc()
-  },
-  {
-    finalize.name(),
-    (PyCFunction)PyBobLearnMiscPLDATrainer_finalize,
-    METH_VARARGS|METH_KEYWORDS,
-    finalize.doc()
-  },  
-  {
-    enrol.name(),
-    (PyCFunction)PyBobLearnMiscPLDATrainer_enrol,
-    METH_VARARGS|METH_KEYWORDS,
-    enrol.doc()
-  },
-  {
-    is_similar_to.name(),
-    (PyCFunction)PyBobLearnMiscPLDATrainer_IsSimilarTo,
-    METH_VARARGS|METH_KEYWORDS,
-    is_similar_to.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnMiscPLDATrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnMiscPLDATrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnMiscPLDATrainer_Type.tp_name      = PLDATrainer_doc.name();
-  PyBobLearnMiscPLDATrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscPLDATrainerObject);
-  PyBobLearnMiscPLDATrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
-  PyBobLearnMiscPLDATrainer_Type.tp_doc       = PLDATrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnMiscPLDATrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnMiscPLDATrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscPLDATrainer_init);
-  PyBobLearnMiscPLDATrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscPLDATrainer_delete);
-  PyBobLearnMiscPLDATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscPLDATrainer_RichCompare);
-  PyBobLearnMiscPLDATrainer_Type.tp_methods      = PyBobLearnMiscPLDATrainer_methods;
-  PyBobLearnMiscPLDATrainer_Type.tp_getset       = PyBobLearnMiscPLDATrainer_getseters;
-  //PyBobLearnMiscPLDATrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscPLDATrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnMiscPLDATrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnMiscPLDATrainer_Type);
-  return PyModule_AddObject(module, "_PLDATrainer", (PyObject*)&PyBobLearnMiscPLDATrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/misc/test_em.py b/bob/learn/misc/test_em.py
deleted file mode 100644
index 88070de4678ee79d521a9a9d48ae4979ab9aa157..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_em.py
+++ /dev/null
@@ -1,252 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Francois Moulin <Francois.Moulin@idiap.ch>
-# Tue May 10 11:35:58 2011 +0200
-#
-# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
-
-"""Test trainer package
-"""
-import unittest
-import numpy
-
-import bob.io.base
-from bob.io.base.test_utils import datafile
-
-from . import KMeansMachine, GMMMachine, KMeansTrainer, \
-    ML_GMMTrainer, MAP_GMMTrainer
-
-#, MAP_GMMTrainer
-
-def loadGMM():
-  gmm = GMMMachine(2, 2)
-
-  gmm.weights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
-  gmm.means = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
-  gmm.variances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__))
-  #gmm.variance_thresholds = numpy.array([[0.001, 0.001],[0.001, 0.001]], 'float64')
-
-  return gmm
-
-def equals(x, y, epsilon):
-  return (abs(x - y) < epsilon).all()
-
-class MyTrainer1(KMeansTrainer):
-  """Simple example of python trainer: """
-
-  def __init__(self):
-    KMeansTrainer.__init__(self)
-
-  def train(self, machine, data):
-    a = numpy.ndarray((2, 2), 'float64')
-    a[0, :] = data[1]
-    a[1, :] = data[2]
-    machine.means = a
-
-def test_gmm_ML_1():
-
-  # Trains a GMMMachine with ML_GMMTrainer
-
-  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))  
-  gmm = loadGMM()
-  
-  ml_gmmtrainer = ML_GMMTrainer(True, True, True)
-  ml_gmmtrainer.train(gmm, ar)
-
-  #config = bob.io.base.HDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
-  #gmm.save(config)
-  
-  gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML.hdf5', __name__)))
-  gmm_ref_32bit_debug = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML_32bit_debug.hdf5', __name__)))
-  gmm_ref_32bit_release = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML_32bit_release.hdf5', __name__)))
-
-  assert (gmm == gmm_ref) or (gmm == gmm_ref_32bit_release) or (gmm == gmm_ref_32bit_release)
-
- 
-def test_gmm_ML_2():
-
-  # Trains a GMMMachine with ML_GMMTrainer; compares to an old reference
-
-  ar = bob.io.base.load(datafile('dataNormalized.hdf5', __name__))
-
-  # Initialize GMMMachine
-  gmm = GMMMachine(5, 45)
-  gmm.means = bob.io.base.load(datafile('meansAfterKMeans.hdf5', __name__)).astype('float64')
-  gmm.variances = bob.io.base.load(datafile('variancesAfterKMeans.hdf5', __name__)).astype('float64')
-  gmm.weights = numpy.exp(bob.io.base.load(datafile('weightsAfterKMeans.hdf5', __name__)).astype('float64'))
-
-  threshold = 0.001
-  gmm.set_variance_thresholds(threshold)
-
-  # Initialize ML Trainer
-  prior = 0.001
-  max_iter_gmm = 25
-  accuracy = 0.00001
-  ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior, converge_by_likelihood=True)
-  ml_gmmtrainer.max_iterations = max_iter_gmm
-  ml_gmmtrainer.convergence_threshold = accuracy
-  
-  # Run ML
-  ml_gmmtrainer.train(gmm, ar)
-
-
-  # Test results
-  # Load torch3vision reference
-  meansML_ref = bob.io.base.load(datafile('meansAfterML.hdf5', __name__))
-  variancesML_ref = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
-  weightsML_ref = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
-
-
-  # Compare to current results
-  assert equals(gmm.means, meansML_ref, 3e-3)
-  assert equals(gmm.variances, variancesML_ref, 3e-3)
-  assert equals(gmm.weights, weightsML_ref, 1e-4)
-
-
-
-def test_gmm_MAP_1():
-
-  # Train a GMMMachine with MAP_GMMTrainer
-
-  ar = bob.io.base.load(datafile('faithful.torch3_f64.hdf5', __name__))
-
-  gmm = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__)))
-  gmmprior = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__)))
-
-  map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, prior_gmm=gmmprior, relevance_factor=4.)  
-  #map_gmmtrainer.set_prior_gmm(gmmprior)
-  map_gmmtrainer.train(gmm, ar)
-
-  #config = bob.io.base.HDF5File(datafile('gmm_MAP.hdf5", 'w', __name__))
-  #gmm.save(config)
-
-  gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_MAP.hdf5', __name__)))
-
-  assert (equals(gmm.means,gmm_ref.means,1e-3) and equals(gmm.variances,gmm_ref.variances,1e-3) and equals(gmm.weights,gmm_ref.weights,1e-3))
-
-
-def test_gmm_MAP_2():
-
-  # Train a GMMMachine with MAP_GMMTrainer and compare with matlab reference
-
-  data = bob.io.base.load(datafile('data.hdf5', __name__))
-  data = data.reshape((1, data.shape[0])) # make a 2D array out of it
-  means = bob.io.base.load(datafile('means.hdf5', __name__))
-  variances = bob.io.base.load(datafile('variances.hdf5', __name__))
-  weights = bob.io.base.load(datafile('weights.hdf5', __name__))
-
-  gmm = GMMMachine(2,50)
-  gmm.means = means
-  gmm.variances = variances
-  gmm.weights = weights
-
-  map_adapt = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, mean_var_update_responsibilities_threshold=0.,prior_gmm=gmm, relevance_factor=4.)
-  #map_adapt.set_prior_gmm(gmm)
-
-  gmm_adapted = GMMMachine(2,50)
-  gmm_adapted.means = means
-  gmm_adapted.variances = variances
-  gmm_adapted.weights = weights
-
-  map_adapt.max_iterations = 1
-  map_adapt.train(gmm_adapted, data)
-
-  new_means = bob.io.base.load(datafile('new_adapted_mean.hdf5', __name__))
-
- # print new_means[0,:]
- # print gmm_adapted.means[:,0]
-
-  # Compare to matlab reference
-  assert equals(new_means[0,:], gmm_adapted.means[:,0], 1e-4)
-  assert equals(new_means[1,:], gmm_adapted.means[:,1], 1e-4)
-
-
-def test_gmm_MAP_3():
-
-  # Train a GMMMachine with MAP_GMMTrainer; compares to old reference
-
-  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__))
-
-  # Initialize GMMMachine
-  n_gaussians = 5
-  n_inputs = 45
-  prior_gmm = GMMMachine(n_gaussians, n_inputs)
-  prior_gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__))
-  prior_gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
-  prior_gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
-
-  threshold = 0.001
-  prior_gmm.set_variance_thresholds(threshold)
-
-  # Initialize MAP Trainer
-  relevance_factor = 0.1
-  prior = 0.001
-  max_iter_gmm = 1
-  accuracy = 0.00001
-  map_factor = 0.5
-  map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, convergence_threshold=prior, prior_gmm=prior_gmm, alpha=map_factor)
-  map_gmmtrainer.max_iterations = max_iter_gmm
-  map_gmmtrainer.convergence_threshold = accuracy
-
-  gmm = GMMMachine(n_gaussians, n_inputs)
-  gmm.set_variance_thresholds(threshold)
-
-  # Train
-  map_gmmtrainer.train(gmm, ar)
-
-  # Test results
-  # Load torch3vision reference
-  meansMAP_ref = bob.io.base.load(datafile('meansAfterMAP.hdf5', __name__))
-  variancesMAP_ref = bob.io.base.load(datafile('variancesAfterMAP.hdf5', __name__))
-  weightsMAP_ref = bob.io.base.load(datafile('weightsAfterMAP.hdf5', __name__))
-
-  # Compare to current results
-  # Gaps are quite large. This might be explained by the fact that there is no
-  # adaptation of a given Gaussian in torch3 when the corresponding responsibilities
-  # are below the responsibilities threshold
-  assert equals(gmm.means, meansMAP_ref, 2e-1)
-  assert equals(gmm.variances, variancesMAP_ref, 1e-4)
-  assert equals(gmm.weights, weightsMAP_ref, 1e-4)
-
-
-def test_gmm_test():
-
-  # Tests a GMMMachine by computing scores against a model and compare to
-  # an old reference
-
-  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__))
-
-  # Initialize GMMMachine
-  n_gaussians = 5
-  n_inputs = 45
-  gmm = GMMMachine(n_gaussians, n_inputs)
-  gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__))
-  gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
-  gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
-
-  threshold = 0.001
-  gmm.set_variance_thresholds(threshold)
-
-  # Test against the model
-  score_mean_ref = -1.50379e+06
-  score = 0.
-  for v in ar: score += gmm(v)
-  score /= len(ar)
-
-  # Compare current results to torch3vision
-  assert abs(score-score_mean_ref)/score_mean_ref<1e-4
-
-
-def test_custom_trainer():
-
-  # Custom python trainer
-
-  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
-
-  mytrainer = MyTrainer1()
-
-  machine = KMeansMachine(2, 2)
-  mytrainer.train(machine, ar)
-
-  for i in range(0, 2):
-    assert (ar[i+1] == machine.means[i, :]).all()
diff --git a/bob/learn/misc/test_gaussian.py b/bob/learn/misc/test_gaussian.py
deleted file mode 100644
index eb2b0faeb19604dbffac7ff4e938933916fc9c64..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_gaussian.py
+++ /dev/null
@@ -1,112 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Thu Feb 16 16:54:45 2012 +0200
-#
-# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests the Gaussian machine
-"""
-
-import os
-import numpy
-import tempfile
-
-import bob.io.base
-
-from bob.learn.misc import Gaussian
-
-def equals(x, y, epsilon):
-  return (abs(x - y) < epsilon)
-
-def test_GaussianNormal():
-  # Test the likelihood computation of a simple normal Gaussian
-  gaussian = Gaussian(2)
-  # By default, initialized with zero mean and unit variance
-  logLH = gaussian.log_likelihood(numpy.array([0.4, 0.2], 'float64'))
-  assert equals(logLH, -1.93787706641, 1e-10)
-
-def test_GaussianMachine():
-  # Test a GaussianMachine more thoroughly
-
-  # Initializes a Gaussian with zero mean and unit variance
-  g = Gaussian(3)
-  assert (g.mean == 0.0).all()
-  assert (g.variance == 1.0).all()
-  assert g.shape == (3,)
-
-  # Set and check mean, variance, variance thresholds
-  mean     = numpy.array([0, 1, 2], 'float64')
-  variance = numpy.array([3, 2, 1], 'float64')
-  g.mean     = mean
-  g.variance = variance
-  g.set_variance_thresholds(0.0005)
-  assert (g.mean == mean).all()
-  assert (g.variance == variance).all()
-  assert (g.variance_thresholds == 0.0005).all()
-
-  # Save and read from file
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  g.save(bob.io.base.HDF5File(filename, 'w'))
-  g_loaded = Gaussian(bob.io.base.HDF5File(filename))
-  assert g == g_loaded
-  assert (g != g_loaded ) is False
-  assert g.is_similar_to(g_loaded)
-  
-  # Save and read from file using the keyword argument
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  g.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
-  g_loaded = Gaussian(hdf5=bob.io.base.HDF5File(filename))
-  assert g == g_loaded
-  assert (g != g_loaded ) is False
-  assert g.is_similar_to(g_loaded)
-
-  # Save and loading from file using the keyword argument
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  g.save(bob.io.base.HDF5File(filename, 'w'))
-  g_loaded = bob.learn.misc.Gaussian()
-  g_loaded.load(bob.io.base.HDF5File(filename))
-  assert g == g_loaded
-  assert (g != g_loaded ) is False
-  assert g.is_similar_to(g_loaded)
-
-  # Save and loading from file using the keyword argument
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  g.save(bob.io.base.HDF5File(filename, 'w'))
-  g_loaded = bob.learn.misc.Gaussian()
-  g_loaded.load(hdf5=bob.io.base.HDF5File(filename))
-  assert g == g_loaded
-  assert (g != g_loaded ) is False
-  assert g.is_similar_to(g_loaded)
-
-
-  # Make them different
-  g_loaded.set_variance_thresholds(0.001)
-  assert (g == g_loaded ) is False
-  assert g != g_loaded
-
-  # Check likelihood computation
-  sample1 = numpy.array([0, 1, 2], 'float64')
-  sample2 = numpy.array([1, 2, 3], 'float64')
-  sample3 = numpy.array([2, 3, 4], 'float64')
-  ref1 = -3.652695334228046
-  ref2 = -4.569362000894712
-  ref3 = -7.319362000894712
-  eps = 1e-10
-  assert equals(g.log_likelihood(sample1), ref1, eps)
-  assert equals(g.log_likelihood(sample2), ref2, eps)
-  assert equals(g.log_likelihood(sample3), ref3, eps)
-
-  # Check resize and assignment
-  g.resize(5)
-  assert g.shape == (5,)
-  g2 = Gaussian()
-  g2 = g
-  assert g == g2
-  assert (g != g2 ) is False
-  g3 = Gaussian(g)
-  assert g == g3
-  assert (g != g3 ) is False
-
-  # Clean-up
-  os.unlink(filename)
diff --git a/bob/learn/misc/test_gmm.py b/bob/learn/misc/test_gmm.py
deleted file mode 100644
index a9e0910b9e1e6974594afa14f80acb757c5a50d8..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_gmm.py
+++ /dev/null
@@ -1,240 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Thu Feb 16 17:57:10 2012 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests the GMM machine and the GMMStats container
-"""
-
-import os
-import numpy
-import tempfile
-
-import bob.io.base
-from bob.io.base.test_utils import datafile
-
-from . import GMMStats, GMMMachine
-
-def test_GMMStats():
-  # Test a GMMStats
-  # Initializes a GMMStats
-  gs = GMMStats(2,3)
-  log_likelihood = -3.
-  T = 57
-  n = numpy.array([4.37, 5.31], 'float64')
-  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
-  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
-  gs.log_likelihood = log_likelihood
-  gs.t = T
-  gs.n = n
-  gs.sum_px = sumpx
-  gs.sum_pxx = sumpxx
-  assert gs.log_likelihood == log_likelihood
-  assert gs.t == T
-  assert (gs.n == n).all()
-  assert (gs.sum_px == sumpx).all()
-  assert (gs.sum_pxx == sumpxx).all()
-  assert gs.shape==(2,3)
-
-  # Saves and reads from file
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(bob.io.base.HDF5File(filename, 'w'))
-  gs_loaded = GMMStats(bob.io.base.HDF5File(filename))
-  assert gs == gs_loaded
-  assert (gs != gs_loaded ) is False
-  assert gs.is_similar_to(gs_loaded)
-  
-  # Saves and reads from file using the keyword argument
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
-  gs_loaded = GMMStats(bob.io.base.HDF5File(filename))
-  assert gs == gs_loaded
-  assert (gs != gs_loaded ) is False
-  assert gs.is_similar_to(gs_loaded)
-
-  # Saves and load from file using the keyword argument
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
-  gs_loaded = GMMStats()
-  gs_loaded.load(bob.io.base.HDF5File(filename))
-  assert gs == gs_loaded
-  assert (gs != gs_loaded ) is False
-  assert gs.is_similar_to(gs_loaded)
-
-  # Saves and load from file using the keyword argument
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  gs.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
-  gs_loaded = GMMStats()
-  gs_loaded.load(hdf5=bob.io.base.HDF5File(filename))
-  assert gs == gs_loaded
-  assert (gs != gs_loaded ) is False
-  assert gs.is_similar_to(gs_loaded)
-  
-  
-  # Makes them different
-  gs_loaded.t = 58
-  assert (gs == gs_loaded ) is False
-  assert gs != gs_loaded
-  assert (gs.is_similar_to(gs_loaded)) is False
-  # Accumulates from another GMMStats
-  gs2 = GMMStats(2,3)
-  gs2.log_likelihood = log_likelihood
-  gs2.t = T
-  gs2.n = n
-  gs2.sum_px = sumpx
-  gs2.sum_pxx = sumpxx
-  gs2 += gs
-  eps = 1e-8
-  assert gs2.log_likelihood == 2*log_likelihood
-  assert gs2.t == 2*T
-  assert numpy.allclose(gs2.n, 2*n, eps)
-  assert numpy.allclose(gs2.sum_px, 2*sumpx, eps)
-  assert numpy.allclose(gs2.sum_pxx, 2*sumpxx, eps)
-
-  # Reinit and checks for zeros
-  gs_loaded.init()
-  assert gs_loaded.log_likelihood == 0
-  assert gs_loaded.t == 0
-  assert (gs_loaded.n == 0).all()
-  assert (gs_loaded.sum_px == 0).all()
-  assert (gs_loaded.sum_pxx == 0).all()
-  # Resize and checks size
-  assert  gs_loaded.shape==(2,3)
-  gs_loaded.resize(4,5)  
-  assert  gs_loaded.shape==(4,5)
-  assert gs_loaded.sum_px.shape[0] == 4
-  assert gs_loaded.sum_px.shape[1] == 5
-
-  # Clean-up
-  os.unlink(filename)
-
-def test_GMMMachine_1():
-  # Test a GMMMachine basic features
-
-  weights   = numpy.array([0.5, 0.5], 'float64')
-  weights2   = numpy.array([0.6, 0.4], 'float64')
-  means     = numpy.array([[3, 70, 0], [4, 72, 0]], 'float64')
-  means2     = numpy.array([[3, 7, 0], [4, 72, 0]], 'float64')
-  variances = numpy.array([[1, 10, 1], [2, 5, 2]], 'float64')
-  variances2 = numpy.array([[10, 10, 1], [2, 5, 2]], 'float64')
-  varianceThresholds = numpy.array([[0, 0, 0], [0, 0, 0]], 'float64')
-  varianceThresholds2 = numpy.array([[0.0005, 0.0005, 0.0005], [0, 0, 0]], 'float64')
-
-  # Initializes a GMMMachine
-  gmm = GMMMachine(2,3)
-  # Sets the weights, means, variances and varianceThresholds and
-  # Checks correctness
-  gmm.weights = weights
-  gmm.means = means
-  gmm.variances = variances
-  gmm.variance_thresholds = varianceThresholds
-  assert gmm.shape == (2,3)
-  assert (gmm.weights == weights).all()
-  assert (gmm.means == means).all()
-  assert (gmm.variances == variances).all()
-  assert (gmm.variance_thresholds == varianceThresholds).all()
-
-  # Checks supervector-like accesses
-  assert (gmm.mean_supervector == means.reshape(means.size)).all()
-  assert (gmm.variance_supervector == variances.reshape(variances.size)).all()
-  newMeans = numpy.array([[3, 70, 2], [4, 72, 2]], 'float64')
-  newVariances = numpy.array([[1, 1, 1], [2, 2, 2]], 'float64')
-
-
-  # Checks particular varianceThresholds-related methods
-  varianceThresholds1D = numpy.array([0.3, 1, 0.5], 'float64')
-  gmm.set_variance_thresholds(varianceThresholds1D)
-  assert (gmm.variance_thresholds[0,:] == varianceThresholds1D).all()
-  assert (gmm.variance_thresholds[1,:] == varianceThresholds1D).all()
-
-  gmm.set_variance_thresholds(0.005)
-  assert (gmm.variance_thresholds == 0.005).all()
-
-  # Checks Gaussians access
-  gmm.means     = newMeans
-  gmm.variances = newVariances
-  assert (gmm.get_gaussian(0).mean == newMeans[0,:]).all()
-  assert (gmm.get_gaussian(1).mean == newMeans[1,:]).all()
-  assert (gmm.get_gaussian(0).variance == newVariances[0,:]).all()
-  assert (gmm.get_gaussian(1).variance == newVariances[1,:]).all()
-
-  # Checks resize
-  gmm.resize(4,5)
-  assert gmm.shape == (4,5)
-
-  # Checks comparison
-  gmm2 = GMMMachine(gmm)
-  gmm3 = GMMMachine(2,3)
-  gmm3.weights = weights2
-  gmm3.means = means
-  gmm3.variances = variances
-  #gmm3.varianceThresholds = varianceThresholds
-  gmm4 = GMMMachine(2,3)
-  gmm4.weights = weights
-  gmm4.means = means2
-  gmm4.variances = variances
-  #gmm4.varianceThresholds = varianceThresholds
-  gmm5 = GMMMachine(2,3)
-  gmm5.weights = weights
-  gmm5.means = means
-  gmm5.variances = variances2
-  #gmm5.varianceThresholds = varianceThresholds
-  gmm6 = GMMMachine(2,3)
-  gmm6.weights = weights
-  gmm6.means = means
-  gmm6.variances = variances
-  #gmm6.varianceThresholds = varianceThresholds2
-
-  assert gmm == gmm2
-  assert (gmm != gmm2) is False
-  assert gmm.is_similar_to(gmm2)
-  assert gmm != gmm3
-  assert (gmm == gmm3) is False
-  assert gmm.is_similar_to(gmm3) is False
-  assert gmm != gmm4
-  assert (gmm == gmm4) is False
-  assert gmm.is_similar_to(gmm4) is False
-  assert gmm != gmm5
-  assert (gmm == gmm5) is False
-  assert gmm.is_similar_to(gmm5) is False
-  assert gmm != gmm6
-  assert (gmm == gmm6) is False
-  assert gmm.is_similar_to(gmm6) is False
-
-def test_GMMMachine_2():
-  # Test a GMMMachine (statistics)
-
-  arrayset = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
-  gmm = GMMMachine(2, 2)
-  gmm.weights   = numpy.array([0.5, 0.5], 'float64')
-  gmm.means     = numpy.array([[3, 70], [4, 72]], 'float64')
-  gmm.variances = numpy.array([[1, 10], [2, 5]], 'float64')
-  gmm.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
-
-  stats = GMMStats(2, 2)
-  gmm.acc_statistics(arrayset, stats)
-
-  stats_ref = GMMStats(bob.io.base.HDF5File(datafile("stats.hdf5",__name__)))
-
-  assert stats.t == stats_ref.t
-  assert numpy.allclose(stats.n, stats_ref.n, atol=1e-10)
-  #assert numpy.array_equal(stats.sumPx, stats_ref.sumPx)
-  #Note AA: precision error above
-  assert numpy.allclose(stats.sum_px, stats_ref.sum_px, atol=1e-10)
-  assert numpy.allclose(stats.sum_pxx, stats_ref.sum_pxx, atol=1e-10)
-
-def test_GMMMachine_3():
-  # Test a GMMMachine (log-likelihood computation)
-
-  data = bob.io.base.load(datafile('data.hdf5', __name__))
-  gmm = GMMMachine(2, 50)
-  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__))
-  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__))
-  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__))
-
-  # Compare the log-likelihood with the one obtained using Chris Matlab
-  # implementation
-  matlab_ll_ref = -2.361583051672024e+02
-  assert abs(gmm(data) - matlab_ll_ref) < 1e-10
diff --git a/bob/learn/misc/test_ivector.py b/bob/learn/misc/test_ivector.py
deleted file mode 100644
index fb9c8b23e9afedc8013d89ce254fcea5ec02db3e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_ivector.py
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Mon Apr 2 11:19:00 2013 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-
-"""Tests the I-Vector machine
-"""
-
-import numpy
-import numpy.linalg
-import numpy.random
-
-from . import GMMMachine, GMMStats, IVectorMachine
-
-
-### Test class inspired by an implementation of Chris McCool
-### Chris McCool (chris.mccool@nicta.com.au)
-class IVectorMachinePy():
-  """An IVector extractor"""
-
-  def __init__(self, ubm=None, dim_t=1):
-    # Our state
-    self.m_ubm = ubm
-    self.m_dim_t = dim_t
-    # Resize the matrices T and sigma
-    self.resize()
-    # Precompute
-    self.precompute()
-
-  def resize(self):
-    if self.m_ubm:
-      dim_cd = self.m_ubm.shape[0] * self.m_ubm.shape[1]
-      self.m_t = numpy.random.randn(dim_cd, self.m_dim_t)
-      self.m_sigma = numpy.random.randn(dim_cd)
-
-  def precompute(self):
-    if self.m_ubm and self.m_t is not None and self.m_sigma is not None:
-      #dim_c = self.m_ubm.dim_c
-      #dim_d = self.m_ubm.dim_d
-      dim_c,dim_d = self.m_ubm.shape
-      self.m_cache_TtSigmaInv = {}
-      self.m_cache_TtSigmaInvT = {}
-      for c in range(dim_c):
-        start                       = c*dim_d
-        end                         = (c+1)*dim_d
-        Tc                          = self.m_t[start:end,:]
-        self.m_cache_TtSigmaInv[c]  = Tc.transpose() / self.m_sigma[start:end]
-        self.m_cache_TtSigmaInvT[c] = numpy.dot(self.m_cache_TtSigmaInv[c], Tc);
-
-  def set_ubm(self, ubm):
-    self.m_ubm = ubm
-    # Precompute
-    self.precompute()
-
-  def get_ubm(self):
-    return self.m_ubm
-
-  def set_t(self, t):
-    # @warning: no dimensions check
-    self.m_t = t
-    # Precompute
-    self.precompute()
-
-  def get_t(self):
-    return self.m_t
-
-  def set_sigma(self, sigma):
-    # @warning: no dimensions check
-    self.m_sigma = sigma
-    # Precompute
-    self.precompute()
-
-  def get_sigma(self):
-    return self.m_sigma
-
-
-  def _get_TtSigmaInv_Fnorm(self, N, F):
-    # Initialization
-    #dim_c = self.m_ubm.dim_c
-    #dim_d = self.m_ubm.dim_d
-    dim_c,dim_d = self.m_ubm.shape
-    mean_supervector = self.m_ubm.mean_supervector
-    TtSigmaInv_Fnorm = numpy.zeros(shape=(self.m_dim_t,), dtype=numpy.float64)
-
-    # Loop over each Gaussian component
-    dim_c = self.m_ubm.shape[0]
-    for c in range(dim_c):
-      start             = c*dim_d
-      end               = (c+1)*dim_d
-      Fnorm             = F[c,:] - N[c] * mean_supervector[start:end]
-      TtSigmaInv_Fnorm  = TtSigmaInv_Fnorm + numpy.dot(self.m_cache_TtSigmaInv[c], Fnorm)
-    return TtSigmaInv_Fnorm
-
-  def _get_I_TtSigmaInvNT(self, N):
-    # Initialization
-    #dim_c = self.m_ubm.dim_c
-    #dim_d = self.m_ubm.dim_d
-    dim_c, dim_d = self.m_ubm.shape
-
-    TtSigmaInvNT = numpy.eye(self.m_dim_t, dtype=numpy.float64)
-    for c in range(dim_c):
-      TtSigmaInvNT = TtSigmaInvNT + self.m_cache_TtSigmaInvT[c] * N[c]
-
-    return TtSigmaInvNT
-
-  def forward(self, gmmstats):
-    if self.m_ubm and not (self.m_t == None) and not (self.m_sigma == None):
-      N = gmmstats.n
-      F = gmmstats.sum_px
-
-      TtSigmaInv_Fnorm = self._get_TtSigmaInv_Fnorm(N, F)
-      TtSigmaInvNT = self._get_I_TtSigmaInvNT(N)
-
-      return numpy.linalg.solve(TtSigmaInvNT, TtSigmaInv_Fnorm)
-
-
-def test_machine():
-
-  # Ubm
-  ubm = GMMMachine(2,3)
-  ubm.weights = numpy.array([0.4,0.6])
-  ubm.means = numpy.array([[1.,7,4],[4,5,3]])
-  ubm.variances = numpy.array([[0.5,1.,1.5],[1.,1.5,2.]])
-
-  # Defines GMMStats
-  gs = GMMStats(2,3)
-  log_likelihood = -3.
-  T = 1
-  n = numpy.array([0.4, 0.6], numpy.float64)
-  sumpx = numpy.array([[1., 2., 3.], [2., 4., 3.]], numpy.float64)
-  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], numpy.float64)
-  gs.log_likelihood = log_likelihood
-  gs.t = T
-  gs.n = n
-  gs.sum_px = sumpx
-  gs.sum_pxx = sumpxx
-
-  # IVector (Python)
-  m = IVectorMachinePy(ubm, 2)
-  t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
-  m.set_t(t)
-  sigma = numpy.array([1.,2.,1.,3.,2.,4.])
-  m.set_sigma(sigma)
-
-  wij_ref = numpy.array([-0.04213415, 0.21463343]) # Reference from original Chris implementation
-  wij = m.forward(gs)
-  assert numpy.allclose(wij_ref, wij, 1e-5)
-
-  # IVector (C++)
-  mc = IVectorMachine(ubm, 2)
-  mc.t = t
-  mc.sigma = sigma
-
-  wij_ref = numpy.array([-0.04213415, 0.21463343]) # Reference from original Chris implementation
-  wij = mc(gs)
-  assert numpy.allclose(wij_ref, wij, 1e-5)
diff --git a/bob/learn/misc/test_ivector_trainer.py b/bob/learn/misc/test_ivector_trainer.py
deleted file mode 100644
index 17a7c3e4377658031a68618f1a9f57790b8f319e..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_ivector_trainer.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests the I-Vector trainer
-"""
-
-import numpy
-import numpy.linalg
-import numpy.random
-
-from bob.learn.misc import GMMMachine, GMMStats, IVectorMachine, IVectorTrainer
-
-### Test class inspired by an implementation of Chris McCool
-### Chris McCool (chris.mccool@nicta.com.au)
-class IVectorTrainerPy():
-  """An IVector extractor"""
-
-  def __init__(self, convergence_threshold=0.001, max_iterations=10,
-      compute_likelihood=False, sigma_update=False, variance_floor=1e-5):
-    self.m_convergence_threshold = convergence_threshold
-    self.m_max_iterations = max_iterations
-    self.m_compute_likelihood = compute_likelihood
-    self.m_sigma_update = sigma_update
-    self.m_variance_floor = variance_floor
-
-  def initialize(self, machine, data):
-    ubm = machine.ubm
-    self.m_dim_c = ubm.shape[0]
-    self.m_dim_d = ubm.shape[1]
-    self.m_dim_t = machine.t.shape[1]
-    self.m_meansupervector = ubm.mean_supervector
-    t = numpy.random.randn(self.m_dim_c*self.m_dim_d, self.m_dim_t)
-    machine.t = t
-    machine.sigma = machine.ubm.variance_supervector
-
-  def e_step(self, machine, data):
-    n_samples = len(data)
-    self.m_acc_Nij_Sigma_wij2  = {}
-    self.m_acc_Fnorm_Sigma_wij = {}
-    self.m_acc_Snorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
-    self.m_N = numpy.zeros(shape=(self.m_dim_c,), dtype=numpy.float64)
-
-    for c in range(self.m_dim_c):
-      self.m_acc_Nij_Sigma_wij2[c]  = numpy.zeros(shape=(self.m_dim_t,self.m_dim_t), dtype=numpy.float64)
-      self.m_acc_Fnorm_Sigma_wij[c] = numpy.zeros(shape=(self.m_dim_d,self.m_dim_t), dtype=numpy.float64)
-
-    for n in range(n_samples):
-      Nij = data[n].n
-      Fij = data[n].sum_px
-      Sij = data[n].sum_pxx
-
-      # Estimate latent variables
-      TtSigmaInv_Fnorm = machine.__compute_TtSigmaInvFnorm__(data[n])
-      I_TtSigmaInvNT = machine.__compute_Id_TtSigmaInvT__(data[n])
-
-      Fnorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
-      Snorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
-
-      # Compute normalized statistics
-      for c in range(self.m_dim_c):
-        start            = c*self.m_dim_d
-        end              = (c+1)*self.m_dim_d
-
-        Fc               = Fij[c,:]
-        Sc               = Sij[c,:]
-        mc               = self.m_meansupervector[start:end]
-
-        Fc_mc            = Fc * mc
-        Nc_mc_mcT        = Nij[c] * mc * mc
-
-        Fnorm[start:end] = Fc - Nij[c] * mc
-        Snorm[start:end] = Sc - (2 * Fc_mc) + Nc_mc_mcT
-
-      # Latent variables
-      I_TtSigmaInvNT_inv = numpy.linalg.inv(I_TtSigmaInvNT)
-      E_w_ij             = numpy.dot(I_TtSigmaInvNT_inv, TtSigmaInv_Fnorm)
-      E_w_ij2            = I_TtSigmaInvNT_inv + numpy.outer(E_w_ij, E_w_ij)
-
-      # Do the accumulation for each component
-      self.m_acc_Snorm   = self.m_acc_Snorm + Snorm    # (dim_c*dim_d)
-      for c in range(self.m_dim_c):
-        start            = c*self.m_dim_d
-        end              = (c+1)*self.m_dim_d
-        current_Fnorm    = Fnorm[start:end]            # (dim_d)
-        self.m_acc_Nij_Sigma_wij2[c]  = self.m_acc_Nij_Sigma_wij2[c] + Nij[c] * E_w_ij2                    # (dim_t, dim_t)
-        self.m_acc_Fnorm_Sigma_wij[c] = self.m_acc_Fnorm_Sigma_wij[c] + numpy.outer(current_Fnorm, E_w_ij) # (dim_d, dim_t)
-        self.m_N[c]                   = self.m_N[c] + Nij[c]
-
-
-  def m_step(self, machine, data):
-    A = self.m_acc_Nij_Sigma_wij2
-
-    T = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,self.m_dim_t), dtype=numpy.float64)
-    Told = machine.t
-    if self.m_sigma_update:
-      sigma = numpy.zeros(shape=self.m_acc_Snorm.shape, dtype=numpy.float64)
-    for c in range(self.m_dim_c):
-      start = c*self.m_dim_d;
-      end   = (c+1)*self.m_dim_d;
-      # T update
-      A     = self.m_acc_Nij_Sigma_wij2[c].transpose()
-      B     = self.m_acc_Fnorm_Sigma_wij[c].transpose()
-      if numpy.array_equal(A, numpy.zeros(A.shape)):
-        X = numpy.zeros(shape=(self.m_dim_t,self.m_dim_d), dtype=numpy.float64)
-      else:
-        X = numpy.linalg.solve(A, B)
-      T[start:end,:] = X.transpose()
-      # Sigma update
-      if self.m_sigma_update:
-        Told_c           = Told[start:end,:].transpose()
-        # warning: Use of the new T estimate! (revert second next line if you don't want that)
-        Fnorm_Ewij_Tt    = numpy.diag(numpy.dot(self.m_acc_Fnorm_Sigma_wij[c], X))
-        #Fnorm_Ewij_Tt = numpy.diag(numpy.dot(self.m_acc_Fnorm_Sigma_wij[c], Told_c))
-        sigma[start:end] = (self.m_acc_Snorm[start:end] - Fnorm_Ewij_Tt) / self.m_N[c]
-
-    machine.t = T
-    if self.m_sigma_update:
-      sigma[sigma < self.m_variance_floor] = self.m_variance_floor
-      machine.sigma = sigma
-
-  def finalize(self, machine, data):
-    pass
-
-  def train(self, machine, data):
-    self.initialize(machine, data)
-    average_output_previous   = -sys.maxsize
-    average_output            = -sys.maxsize
-    self.e_step(machine, data)
-
-    i = 0
-    while True:
-      average_output_previous = average_output
-      self.m_step(machine, data)
-      self.e_step(machine, data)
-      if(self.m_max_iterations > 0 and i+1 >= self.m_max_iterations):
-        break
-      i += 1
-
-
-def test_trainer_nosigma():
-  # Ubm
-  ubm = GMMMachine(2,3)
-  ubm.weights = numpy.array([0.4,0.6])
-  ubm.means = numpy.array([[1.,7,4],[4,5,3]])
-  ubm.variances = numpy.array([[0.5,1.,1.5],[1.,1.5,2.]])
-
-  # Defines GMMStats
-  gs1 = GMMStats(2,3)
-  log_likelihood1 = -3.
-  T1 = 1
-  n1 = numpy.array([0.4, 0.6], numpy.float64)
-  sumpx1 = numpy.array([[1., 2., 3.], [2., 4., 3.]], numpy.float64)
-  sumpxx1 = numpy.array([[10., 20., 30.], [40., 50., 60.]], numpy.float64)
-  gs1.log_likelihood = log_likelihood1
-  gs1.t = T1
-  gs1.n = n1
-  gs1.sum_px = sumpx1
-  gs1.sum_pxx = sumpxx1
-
-  gs2 = GMMStats(2,3)
-  log_likelihood2 = -4.
-  T2 = 1
-  n2 = numpy.array([0.2, 0.8], numpy.float64)
-  sumpx2 = numpy.array([[2., 1., 3.], [3., 4.1, 3.2]], numpy.float64)
-  sumpxx2 = numpy.array([[12., 15., 25.], [39., 51., 62.]], numpy.float64)
-  gs2.log_likelihood = log_likelihood2
-  gs2.t = T2
-  gs2.n = n2
-  gs2.sum_px = sumpx2
-  gs2.sum_pxx = sumpxx2
-
-  data = [gs1, gs2]
-
-
-  acc_Nij_Sigma_wij2_ref1  = {0: numpy.array([[ 0.03202305, -0.02947769], [-0.02947769,  0.0561132 ]]),
-                             1: numpy.array([[ 0.07953279, -0.07829414], [-0.07829414,  0.13814242]])}
-  acc_Fnorm_Sigma_wij_ref1 = {0: numpy.array([[-0.29622691,  0.61411796], [ 0.09391764, -0.27955961], [-0.39014455,  0.89367757]]),
-                             1: numpy.array([[ 0.04695882, -0.13977981], [-0.05718673,  0.24159665], [-0.17098161,  0.47326585]])}
-  acc_Snorm_ref1           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
-  N_ref1                   = numpy.array([0.6, 1.4])
-  t_ref1                   = numpy.array([[  1.59543739, 11.78239235], [ -3.20130371, -6.66379081], [  4.79674111, 18.44618316],
-                                          [ -0.91765407, -1.5319461 ], [  2.26805901,  3.03434944], [  2.76600031,  4.9935962 ]])
-
-  acc_Nij_Sigma_wij2_ref2  = {0: numpy.array([[ 0.37558389, -0.15405228], [-0.15405228,  0.1421269 ]]),
-                             1: numpy.array([[ 1.02076081, -0.57683953], [-0.57683953,  0.53912239]])}
-  acc_Fnorm_Sigma_wij_ref2 = {0: numpy.array([[-1.1261668 ,  1.46496753], [-0.03579289, -0.37875811], [-1.09037391,  1.84372565]]),
-                             1: numpy.array([[-0.01789645, -0.18937906], [ 0.35221084,  0.15854126], [-0.10004552,  0.72559036]])}
-  acc_Snorm_ref2           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
-  N_ref2                   = numpy.array([0.6, 1.4])
-  t_ref2                   = numpy.array([[  2.2133685,  12.70654597], [ -2.13959381, -4.98404887], [  4.35296231, 17.69059484],
-                                          [ -0.54644055, -0.93594252], [  1.29308324,  1.67762053], [  1.67583072,  3.13894546]])
-  acc_Nij_Sigma_wij2_ref = [acc_Nij_Sigma_wij2_ref1, acc_Nij_Sigma_wij2_ref2]
-  acc_Fnorm_Sigma_wij_ref = [acc_Fnorm_Sigma_wij_ref1, acc_Fnorm_Sigma_wij_ref2]
-  acc_Snorm_ref = [acc_Snorm_ref1, acc_Snorm_ref2]
-  N_ref = [N_ref1, N_ref2]
-  t_ref = [t_ref1, t_ref2]
-
-  # Python implementation
-  # Machine
-  m = IVectorMachine(ubm, 2)
-  t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
-  sigma = numpy.array([1.,2.,1.,3.,2.,4.])
-
-  # Initialization
-  trainer = IVectorTrainerPy()
-  trainer.initialize(m, data)
-  m.t = t
-  m.sigma = sigma
-  for it in range(2):
-    # E-Step
-    trainer.e_step(m, data)
-    for k in acc_Nij_Sigma_wij2_ref[it]:
-      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.m_acc_Nij_Sigma_wij2[k], 1e-5)
-    for k in acc_Fnorm_Sigma_wij_ref[it]:
-      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.m_acc_Fnorm_Sigma_wij[k], 1e-5)
-    assert numpy.allclose(acc_Snorm_ref[it], trainer.m_acc_Snorm, 1e-5)
-    assert numpy.allclose(N_ref[it], trainer.m_N, 1e-5)
-
-    # M-Step
-    trainer.m_step(m, data)
-    assert numpy.allclose(t_ref[it], m.t, 1e-5)
-
-  # C++ implementation
-  # Machine
-  m = IVectorMachine(ubm, 2)
-
-  # Initialization
-  trainer = IVectorTrainer()
-  trainer.initialize(m)
-  m.t = t
-  m.sigma = sigma
-  for it in range(2):
-    # E-Step
-    trainer.e_step(m, data)
-    for k in acc_Nij_Sigma_wij2_ref[it]:
-      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.acc_nij_wij2[k], 1e-5)
-    for k in acc_Fnorm_Sigma_wij_ref[it]:
-      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.acc_fnormij_wij[k], 1e-5)
-
-    # M-Step
-    trainer.m_step(m)
-    assert numpy.allclose(t_ref[it], m.t, 1e-5)
-
-def test_trainer_update_sigma():
-  # Ubm
-  dim_c = 2
-  dim_d = 3
-  ubm = GMMMachine(dim_c,dim_d)
-  ubm.weights = numpy.array([0.4,0.6])
-  ubm.means = numpy.array([[1.,7,4],[4,5,3]])
-  ubm.variances = numpy.array([[0.5,1.,1.5],[1.,1.5,2.]])
-
-  # Defines GMMStats
-  gs1 = GMMStats(dim_c,dim_d)
-  log_likelihood1 = -3.
-  T1 = 1
-  n1 = numpy.array([0.4, 0.6], numpy.float64)
-  sumpx1 = numpy.array([[1., 2., 3.], [2., 4., 3.]], numpy.float64)
-  sumpxx1 = numpy.array([[10., 20., 30.], [40., 50., 60.]], numpy.float64)
-  gs1.log_likelihood = log_likelihood1
-  gs1.t = T1
-  gs1.n = n1
-  gs1.sum_px = sumpx1
-  gs1.sum_pxx = sumpxx1
-
-  gs2 = GMMStats(dim_c,dim_d)
-  log_likelihood2 = -4.
-  T2 = 1
-  n2 = numpy.array([0.2, 0.8], numpy.float64)
-  sumpx2 = numpy.array([[2., 1., 3.], [3., 4.1, 3.2]], numpy.float64)
-  sumpxx2 = numpy.array([[12., 15., 25.], [39., 51., 62.]], numpy.float64)
-  gs2.log_likelihood = log_likelihood2
-  gs2.t = T2
-  gs2.n = n2
-  gs2.sum_px = sumpx2
-  gs2.sum_pxx = sumpxx2
-
-  data = [gs1, gs2]
-
-  # Reference values
-  acc_Nij_Sigma_wij2_ref1  = {0: numpy.array([[ 0.03202305, -0.02947769], [-0.02947769,  0.0561132 ]]),
-                              1: numpy.array([[ 0.07953279, -0.07829414], [-0.07829414,  0.13814242]])}
-  acc_Fnorm_Sigma_wij_ref1 = {0: numpy.array([[-0.29622691,  0.61411796], [ 0.09391764, -0.27955961], [-0.39014455,  0.89367757]]),
-                              1: numpy.array([[ 0.04695882, -0.13977981], [-0.05718673,  0.24159665], [-0.17098161,  0.47326585]])}
-  acc_Snorm_ref1           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
-  N_ref1                   = numpy.array([0.6, 1.4])
-  t_ref1                   = numpy.array([[  1.59543739, 11.78239235], [ -3.20130371, -6.66379081], [  4.79674111, 18.44618316],
-                                          [ -0.91765407, -1.5319461 ], [  2.26805901,  3.03434944], [  2.76600031,  4.9935962 ]])
-  sigma_ref1               = numpy.array([ 16.39472121, 34.72955353,  3.3108037, 43.73496916, 38.85472445, 68.22116903])
-
-  acc_Nij_Sigma_wij2_ref2  = {0: numpy.array([[ 0.50807426, -0.11907756], [-0.11907756,  0.12336544]]),
-                              1: numpy.array([[ 1.18602399, -0.2835859 ], [-0.2835859 ,  0.39440498]])}
-  acc_Fnorm_Sigma_wij_ref2 = {0: numpy.array([[ 0.07221453,  1.1189786 ], [-0.08681275, -0.35396112], [ 0.15902728,  1.47293972]]),
-                              1: numpy.array([[-0.04340637, -0.17698056], [ 0.10662127,  0.21484933],[ 0.13116645,  0.64474271]])}
-  acc_Snorm_ref2           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
-  N_ref2                   = numpy.array([0.6, 1.4])
-  t_ref2                   = numpy.array([[  2.93105054, 11.89961223], [ -1.08988119, -3.92120757], [  4.02093173, 15.82081981],
-                                          [ -0.17376634, -0.57366984], [  0.26585634,  0.73589952], [  0.60557877,   2.07014704]])
-  sigma_ref2               = numpy.array([5.12154025e+00, 3.48623823e+01, 1.00000000e-05, 4.37792350e+01, 3.91525332e+01, 6.85613258e+01])
-
-  acc_Nij_Sigma_wij2_ref = [acc_Nij_Sigma_wij2_ref1, acc_Nij_Sigma_wij2_ref2]
-  acc_Fnorm_Sigma_wij_ref = [acc_Fnorm_Sigma_wij_ref1, acc_Fnorm_Sigma_wij_ref2]
-  acc_Snorm_ref = [acc_Snorm_ref1, acc_Snorm_ref2]
-  N_ref = [N_ref1, N_ref2]
-  t_ref = [t_ref1, t_ref2]
-  sigma_ref = [sigma_ref1, sigma_ref2]
-
-
-  # Python implementation
-  # Machine
-  m = IVectorMachine(ubm, 2)
-  t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
-  sigma = numpy.array([1.,2.,1.,3.,2.,4.])
-
-  # Initialization
-  trainer = IVectorTrainerPy(sigma_update=True)
-  trainer.initialize(m, data)
-  m.t = t
-  m.sigma = sigma
-  for it in range(2):
-    # E-Step
-    trainer.e_step(m, data)
-    for k in acc_Nij_Sigma_wij2_ref[it]:
-      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.m_acc_Nij_Sigma_wij2[k], 1e-5)
-    for k in acc_Fnorm_Sigma_wij_ref[it]:
-      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.m_acc_Fnorm_Sigma_wij[k], 1e-5)
-    assert numpy.allclose(acc_Snorm_ref[it], trainer.m_acc_Snorm, 1e-5)
-    assert numpy.allclose(N_ref[it], trainer.m_N, 1e-5)
-
-    # M-Step
-    trainer.m_step(m, data)
-    assert numpy.allclose(t_ref[it], m.t, 1e-5)
-    assert numpy.allclose(sigma_ref[it], m.sigma, 1e-5)
-
-
-  # C++ implementation
-  # Machine
-  m = IVectorMachine(ubm, 2)
-  m.variance_threshold = 1e-5
-
-  # Initialization
-  trainer = IVectorTrainer(update_sigma=True)
-  trainer.initialize(m)
-  m.t = t
-  m.sigma = sigma
-  for it in range(2):
-    # E-Step
-    trainer.e_step(m, data)
-    for k in acc_Nij_Sigma_wij2_ref[it]:
-      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.acc_nij_wij2[k], 1e-5)
-    for k in acc_Fnorm_Sigma_wij_ref[it]:
-      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.acc_fnormij_wij[k], 1e-5)
-    assert numpy.allclose(acc_Snorm_ref[it].reshape(dim_c,dim_d), trainer.acc_snormij, 1e-5)
-    assert numpy.allclose(N_ref[it], trainer.acc_nij, 1e-5)
-
-    # M-Step
-    trainer.m_step(m)
-    assert numpy.allclose(t_ref[it], m.t, 1e-5)
-    assert numpy.allclose(sigma_ref[it], m.sigma, 1e-5)
-
diff --git a/bob/learn/misc/test_jfa.py b/bob/learn/misc/test_jfa.py
deleted file mode 100644
index 594ada436ef1d66614302066b4e20d275b9b2a31..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_jfa.py
+++ /dev/null
@@ -1,396 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Wed Feb 15 23:24:35 2012 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests on the JFA-based machines
-"""
-
-import os
-import numpy
-import numpy.linalg
-import tempfile
-
-import bob.io.base
-
-from . import GMMMachine, GMMStats, JFABase, ISVBase, ISVMachine, JFAMachine
-
-def estimate_x(dim_c, dim_d, mean, sigma, U, N, F):
-  # Compute helper values
-  UtSigmaInv = {}
-  UtSigmaInvU = {}
-  dim_ru = U.shape[1]
-  for c in range(dim_c):
-    start                       = c*dim_d
-    end                         = (c+1)*dim_d
-    Uc                          = U[start:end,:]
-    UtSigmaInv[c]  = Uc.transpose() / sigma[start:end]
-    UtSigmaInvU[c] = numpy.dot(UtSigmaInv[c], Uc);
-
-  # I + (U^{T} \Sigma^-1 N U)
-  I_UtSigmaInvNU = numpy.eye(dim_ru, dtype=numpy.float64)
-  for c in range(dim_c):
-    I_UtSigmaInvNU = I_UtSigmaInvNU + UtSigmaInvU[c] * N[c]
-
-  # U^{T} \Sigma^-1 F
-  UtSigmaInv_Fnorm = numpy.zeros((dim_ru,), numpy.float64)
-  for c in range(dim_c):
-    start             = c*dim_d
-    end               = (c+1)*dim_d
-    Fnorm             = F[c,:] - N[c] * mean[start:end]
-    UtSigmaInv_Fnorm  = UtSigmaInv_Fnorm + numpy.dot(UtSigmaInv[c], Fnorm)
-
-  return numpy.linalg.solve(I_UtSigmaInvNU, UtSigmaInv_Fnorm)
-
-def estimate_ux(dim_c, dim_d, mean, sigma, U, N, F):
-  return numpy.dot(U, estimate_x(dim_c, dim_d, mean, sigma, U, N, F))
-
-
-def test_JFABase():
-
-  # Creates a UBM
-  weights = numpy.array([0.4, 0.6], 'float64')
-  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
-  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
-  ubm = GMMMachine(2,3)
-  ubm.weights = weights
-  ubm.means = means
-  ubm.variances = variances
-
-  # Creates a JFABase
-  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
-  V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
-  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
-  m = JFABase(ubm, ru=1, rv=1)
-  
-  _,_,ru,rv = m.shape 
-  assert ru == 1
-  assert rv == 1
-
-  # Checks for correctness
-  m.resize(2,2)
-  m.u = U
-  m.v = V
-  m.d = d  
-  n_gaussians,dim,ru,rv = m.shape
-  supervector_length    = m.supervector_length
-  
-  assert (m.u == U).all()
-  assert (m.v == V).all()
-  assert (m.d == d).all()  
-  assert n_gaussians        == 2
-  assert dim                == 3
-  assert supervector_length == 6
-  assert ru                 == 2
-  assert rv                 == 2
-
-  # Saves and loads
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(bob.io.base.HDF5File(filename, 'w'))
-  m_loaded = JFABase(bob.io.base.HDF5File(filename))
-  m_loaded.ubm = ubm
-  assert m == m_loaded
-  assert (m != m_loaded) is False
-  assert m.is_similar_to(m_loaded)
-
-  # Copy constructor
-  mc = JFABase(m)
-  assert m == mc
-
-  # Variant
-  #mv = JFABase()
-  # Checks for correctness
-  #mv.ubm = ubm
-  #mv.resize(2,2)
-  #mv.u = U
-  #mv.v = V
-  #mv.d = d
-  #assert (m.u == U).all()
-  #assert (m.v == V).all()
-  #assert (m.d == d).all()
-  #assert m.dim_c == 2
-  #assert m.dim_d == 3
-  #assert m.dim_cd == 6
-  #assert m.dim_ru == 2
-  #assert m.dim_rv == 2
-
-  # Clean-up
-  os.unlink(filename)
-
-def test_ISVBase():
-
-  # Creates a UBM
-  weights = numpy.array([0.4, 0.6], 'float64')
-  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
-  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
-  ubm           = GMMMachine(2,3)
-  ubm.weights   = weights
-  ubm.means     = means
-  ubm.variances = variances
-
-  # Creates a ISVBase
-  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
-  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
-  m = ISVBase(ubm, ru=1)
-  _,_,ru = m.shape
-  assert ru == 1
-
-  # Checks for correctness
-  m.resize(2)
-  m.u = U
-  m.d = d
-  n_gaussians,dim,ru = m.shape
-  supervector_length = m.supervector_length
-  assert (m.u == U).all()
-  assert (m.d == d).all()  
-  assert n_gaussians        == 2
-  assert dim                == 3
-  assert supervector_length == 6
-  assert ru                 == 2
-
-  # Saves and loads
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(bob.io.base.HDF5File(filename, 'w'))
-  m_loaded = ISVBase(bob.io.base.HDF5File(filename))
-  m_loaded.ubm = ubm
-  assert m == m_loaded
-  assert (m != m_loaded) is False
-  assert m.is_similar_to(m_loaded)
-
-  # Copy constructor
-  mc = ISVBase(m)
-  assert m == mc
-
-  # Variant
-  #mv = ISVBase()
-  # Checks for correctness
-  #mv.ubm = ubm
-  #mv.resize(2)
-  #mv.u = U
-  #mv.d = d
-  #assert (m.u == U).all()
-  #assert (m.d == d).all()
-  #ssert m.dim_c == 2
-  #assert m.dim_d == 3
-  #assert m.dim_cd == 6
-  #assert m.dim_ru == 2
-
-  # Clean-up
-  os.unlink(filename)
-
-def test_JFAMachine():
-
-  # Creates a UBM
-  weights   = numpy.array([0.4, 0.6], 'float64')
-  means     = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
-  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
-  ubm           = GMMMachine(2,3)
-  ubm.weights   = weights
-  ubm.means     = means
-  ubm.variances = variances
-
-  # Creates a JFABase
-  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
-  V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
-  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
-  base = JFABase(ubm,2,2)
-  base.u = U
-  base.v = V
-  base.d = d
-
-  # Creates a JFAMachine
-  y = numpy.array([1,2], 'float64')
-  z = numpy.array([3,4,1,2,0,1], 'float64')
-  m = JFAMachine(base)
-  m.y = y
-  m.z = z
-  n_gaussians,dim,ru,rv = m.shape
-  supervector_length    = m.supervector_length  
-  
-  assert n_gaussians        == 2
-  assert dim                == 3
-  assert supervector_length == 6
-  assert ru                 == 2
-  assert rv                 == 2
-  assert (m.y == y).all()
-  assert (m.z == z).all()
-
-  # Saves and loads
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(bob.io.base.HDF5File(filename, 'w'))
-  m_loaded = JFAMachine(bob.io.base.HDF5File(filename))
-  m_loaded.jfa_base = base
-  assert m == m_loaded
-  assert (m != m_loaded) is False
-  assert m.is_similar_to(m_loaded)
-
-  # Copy constructor
-  mc = JFAMachine(m)
-  assert m == mc
-
-  # Variant
-  #mv = JFAMachine()
-  # Checks for correctness
-  #mv.jfa_base = base
-  #m.y = y
-  #m.z = z
-  #assert m.dim_c == 2
-  #assert m.dim_d == 3
-  #assert m.dim_cd == 6
-  #assert m.dim_ru == 2
-  #assert m.dim_rv == 2
-  #assert (m.y == y).all()
-  #assert (m.z == z).all()
-
-  # Defines GMMStats
-  gs = GMMStats(2,3)
-  log_likelihood = -3.
-  T = 1
-  n = numpy.array([0.4, 0.6], 'float64')
-  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
-  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
-  gs.log_likelihood = log_likelihood
-  gs.t = T
-  gs.n = n
-  gs.sum_px = sumpx
-  gs.sum_pxx = sumpxx
-
-  # Forward GMMStats and check estimated value of the x speaker factor
-  eps = 1e-10
-  x_ref = numpy.array([0.291042849767692, 0.310273618998444], 'float64')
-  score_ref = -2.111577181208289
-  score = m(gs)
-  assert numpy.allclose(m.x, x_ref, eps)
-  assert abs(score_ref-score) < eps
-
-  # x and Ux
-  x = numpy.ndarray((2,), numpy.float64)
-  m.estimate_x(gs, x)
-  n_gaussians, dim,_,_ = m.shape
-  x_py = estimate_x(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
-  assert numpy.allclose(x, x_py, eps)
-
-  ux = numpy.ndarray((6,), numpy.float64)
-  m.estimate_ux(gs, ux)
-  n_gaussians, dim,_,_ = m.shape  
-  ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
-  assert numpy.allclose(ux, ux_py, eps)
-  assert numpy.allclose(m.x, x, eps)
-
-  score = m.forward_ux(gs, ux)
-
-  assert abs(score_ref-score) < eps
-
-  # Clean-up
-  os.unlink(filename)
-
-def test_ISVMachine():
-
-  # Creates a UBM
-  weights = numpy.array([0.4, 0.6], 'float64')
-  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
-  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
-  ubm = GMMMachine(2,3)
-  ubm.weights = weights
-  ubm.means = means
-  ubm.variances = variances
-
-  # Creates a ISVBaseMachine
-  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
-  #V = numpy.array([[0], [0], [0], [0], [0], [0]], 'float64')
-  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
-  base = ISVBase(ubm,2)
-  base.u = U
-  #base.v = V
-  base.d = d
-
-  # Creates a JFAMachine
-  z = numpy.array([3,4,1,2,0,1], 'float64')
-  m = ISVMachine(base)
-  m.z = z
-  
-  n_gaussians,dim,ru    = m.shape
-  supervector_length    = m.supervector_length  
-  assert n_gaussians          == 2
-  assert dim                  == 3
-  assert supervector_length   == 6
-  assert ru                   == 2
-  assert (m.z == z).all()
-
-  # Saves and loads
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(bob.io.base.HDF5File(filename, 'w'))
-  m_loaded = ISVMachine(bob.io.base.HDF5File(filename))
-  m_loaded.isv_base = base
-  assert m == m_loaded
-  assert (m != m_loaded) is False
-  assert m.is_similar_to(m_loaded)
-
-  # Copy constructor
-  mc = ISVMachine(m)
-  assert m == mc
-
-  # Variant
-  mv = ISVMachine(base)
-  # Checks for correctness
-  #mv.isv_base = base
-  m.z = z
-
-  n_gaussians,dim,ru    = m.shape
-  supervector_length    = m.supervector_length  
-  assert n_gaussians        == 2
-  assert dim                == 3
-  assert supervector_length == 6
-  assert ru                 == 2
-  assert (m.z == z).all()
-
-  # Defines GMMStats
-  gs = GMMStats(2,3)
-  log_likelihood = -3.
-  T = 1
-  n = numpy.array([0.4, 0.6], 'float64')
-  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
-  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
-  gs.log_likelihood = log_likelihood
-  gs.t = T
-  gs.n = n
-  gs.sum_px = sumpx
-  gs.sum_pxx = sumpxx
-
-  # Forward GMMStats and check estimated value of the x speaker factor
-  eps = 1e-10
-  x_ref = numpy.array([0.291042849767692, 0.310273618998444], 'float64')
-  score_ref = -3.280498193082100
-
-  score = m(gs)
-  assert numpy.allclose(m.x, x_ref, eps)  
-  assert abs(score_ref-score) < eps
-
-  # Check using alternate forward() method
-  supervector_length = m.supervector_length
-  Ux = numpy.ndarray(shape=(supervector_length,), dtype=numpy.float64)
-  m.estimate_ux(gs, Ux)
-  score = m.forward_ux(gs, Ux)
-  assert abs(score_ref-score) < eps
-
-  # x and Ux
-  x = numpy.ndarray((2,), numpy.float64)
-  m.estimate_x(gs, x)
-  n_gaussians,dim,_    = m.shape  
-  x_py = estimate_x(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
-  assert numpy.allclose(x, x_py, eps)
-
-  ux = numpy.ndarray((6,), numpy.float64)
-  m.estimate_ux(gs, ux)
-  n_gaussians,dim,_    = m.shape  
-  ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
-  assert numpy.allclose(ux, ux_py, eps)
-  assert numpy.allclose(m.x, x, eps)
-
-  score = m.forward_ux(gs, ux)
-  assert abs(score_ref-score) < eps
-
-  # Clean-up
-  os.unlink(filename)
diff --git a/bob/learn/misc/test_jfa_trainer.py b/bob/learn/misc/test_jfa_trainer.py
deleted file mode 100644
index 4df6d00a867e2fda9bddba1a2b15f70688c5ab0a..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_jfa_trainer.py
+++ /dev/null
@@ -1,316 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Tue Jul 19 12:16:17 2011 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Test JFA trainer package
-"""
-
-import numpy
-import numpy.linalg
-
-import bob.core.random
-
-from . import GMMStats, GMMMachine, JFABase, JFAMachine, ISVBase, ISVMachine, JFATrainer, ISVTrainer
-
-
-def equals(x, y, epsilon):
-  return (abs(x - y) < epsilon).all()
-
-# Define Training set and initial values for tests
-F1 = numpy.array( [0.3833, 0.4516, 0.6173, 0.2277, 0.5755, 0.8044, 0.5301,
-  0.9861, 0.2751, 0.0300, 0.2486, 0.5357]).reshape((6,2))
-F2 = numpy.array( [0.0871, 0.6838, 0.8021, 0.7837, 0.9891, 0.5341, 0.0669,
-  0.8854, 0.9394, 0.8990, 0.0182, 0.6259]).reshape((6,2))
-F=[F1, F2]
-
-N1 = numpy.array([0.1379, 0.1821, 0.2178, 0.0418]).reshape((2,2))
-N2 = numpy.array([0.1069, 0.9397, 0.6164, 0.3545]).reshape((2,2))
-N=[N1, N2]
-
-gs11 = GMMStats(2,3)
-gs11.n = N1[:,0]
-gs11.sum_px = F1[:,0].reshape(2,3)
-gs12 = GMMStats(2,3)
-gs12.n = N1[:,1]
-gs12.sum_px = F1[:,1].reshape(2,3)
-
-gs21 = GMMStats(2,3)
-gs21.n = N2[:,0]
-gs21.sum_px = F2[:,0].reshape(2,3)
-gs22 = GMMStats(2,3)
-gs22.n = N2[:,1]
-gs22.sum_px = F2[:,1].reshape(2,3)
-
-TRAINING_STATS = [[gs11, gs12], [gs21, gs22]]
-UBM_MEAN = numpy.array([0.1806, 0.0451, 0.7232, 0.3474, 0.6606, 0.3839])
-UBM_VAR = numpy.array([0.6273, 0.0216, 0.9106, 0.8006, 0.7458, 0.8131])
-M_d = numpy.array([0.4106, 0.9843, 0.9456, 0.6766, 0.9883, 0.7668])
-M_v = numpy.array( [0.3367, 0.4116, 0.6624, 0.6026, 0.2442, 0.7505, 0.2955,
-  0.5835, 0.6802, 0.5518, 0.5278,0.5836]).reshape((6,2))
-M_u = numpy.array( [0.5118, 0.3464, 0.0826, 0.8865, 0.7196, 0.4547, 0.9962,
-  0.4134, 0.3545, 0.2177, 0.9713, 0.1257]).reshape((6,2))
-
-z1 = numpy.array([0.3089, 0.7261, 0.7829, 0.6938, 0.0098, 0.8432])
-z2 = numpy.array([0.9223, 0.7710, 0.0427, 0.3782, 0.7043, 0.7295])
-y1 = numpy.array([0.2243, 0.2691])
-y2 = numpy.array([0.6730, 0.4775])
-x1 = numpy.array([0.9976, 0.8116, 0.1375, 0.3900]).reshape((2,2))
-x2 = numpy.array([0.4857, 0.8944, 0.9274, 0.9175]).reshape((2,2))
-M_z=[z1, z2]
-M_y=[y1, y2]
-M_x=[x1, x2]
-
-
-def test_JFATrainer_updateYandV():
-  # test the JFATrainer for updating Y and V
-
-  v_ref = numpy.array( [0.7228, 0.7892, 0.6475, 0.6080, 0.8631, 0.8416,
-    1.6512, 1.6068, 0.0500, 0.0101, 0.4325, 0.6719]).reshape((6,2))
-
-  y1 = numpy.array([0., 0.])
-  y2 = numpy.array([0., 0.])
-  y3 = numpy.array([0.9630, 1.3868])
-  y4 = numpy.array([0.0426, -0.3721])
-  y=[y1, y2]
-
-  # call the updateY function
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-  m = JFABase(ubm,2,2)
-  t = JFATrainer(10)
-  t.initialize(m, TRAINING_STATS)
-  m.u = M_u
-  m.v = M_v
-  m.d = M_d
-  t.__X__ = M_x
-  t.__Y__ = y
-  t.__Z__ = M_z
-  t.e_step1(m, TRAINING_STATS)
-  t.m_step1(m, TRAINING_STATS)
-
-  # Expected results(JFA cookbook, matlab)
-  assert equals(t.__Y__[0], y3, 2e-4)
-  assert equals(t.__Y__[1], y4, 2e-4)
-  assert equals(m.v, v_ref, 2e-4)
-
-
-def test_JFATrainer_updateXandU():
-  # test the JFATrainer for updating X and U
-
-  u_ref = numpy.array( [0.6729, 0.3408, 0.0544, 1.0653, 0.5399, 1.3035,
-    2.4995, 0.4385, 0.1292, -0.0576, 1.1962, 0.0117]).reshape((6,2))
-
-  x1 = numpy.array([0., 0., 0., 0.]).reshape((2,2))
-  x2 = numpy.array([0., 0., 0., 0.]).reshape((2,2))
-  x3 = numpy.array([0.2143, 1.8275, 3.1979, 0.1227]).reshape((2,2))
-  x4 = numpy.array([-1.3861, 0.2359, 5.3326, -0.7914]).reshape((2,2))
-  x  = [x1, x2]
-
-  # call the updateX function
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-  m = JFABase(ubm,2,2)
-  t = JFATrainer(10)
-  t.initialize(m, TRAINING_STATS)
-  m.u = M_u
-  m.v = M_v
-  m.d = M_d
-  t.__X__ = x
-  t.__Y__ = M_y
-  t.__Z__ = M_z
-  t.e_step2(m, TRAINING_STATS)
-  t.m_step2(m, TRAINING_STATS)
-
-  # Expected results(JFA cookbook, matlab)
-  assert equals(t.__X__[0], x3, 2e-4)
-  assert equals(t.__X__[1], x4, 2e-4)
-  assert equals(m.u, u_ref, 2e-4)
-
-
-def test_JFATrainer_updateZandD():
-  # test the JFATrainer for updating Z and D
-
-  d_ref = numpy.array([0.3110, 1.0138, 0.8297, 1.0382, 0.0095, 0.6320])
-
-  z1 = numpy.array([0., 0., 0., 0., 0., 0.])
-  z2 = numpy.array([0., 0., 0., 0., 0., 0.])
-  z3_ref = numpy.array([0.3256, 1.8633, 0.6480, 0.8085, -0.0432, 0.2885])
-  z4_ref = numpy.array([-0.3324, -0.1474, -0.4404, -0.4529, 0.0484, -0.5848])
-  z=[z1, z2]
-
-  # call the updateZ function
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-  m = JFABase(ubm,2,2)
-  t = JFATrainer(10)
-  t.initialize(m, TRAINING_STATS)
-  m.u = M_u
-  m.v = M_v
-  m.d = M_d
-  t.__X__ = M_x
-  t.__Y__ = M_y
-  t.__Z__ = z
-  t.e_step3(m, TRAINING_STATS)
-  t.m_step3(m, TRAINING_STATS)
-
-  # Expected results(JFA cookbook, matlab)
-  assert equals(t.__Z__[0], z3_ref, 2e-4)
-  assert equals(t.__Z__[1], z4_ref, 2e-4)
-  assert equals(m.d, d_ref, 2e-4)
-
-
-def test_JFATrainAndEnrol():
-  # Train and enrol a JFAMachine
-
-  # Calls the train function
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-  mb = JFABase(ubm, 2, 2)
-  t = JFATrainer(10)
-  t.initialize(mb, TRAINING_STATS)
-  mb.u = M_u
-  mb.v = M_v
-  mb.d = M_d
-  t.train_loop(mb, TRAINING_STATS)
-
-  v_ref = numpy.array([[0.245364911936476, 0.978133261775424], [0.769646805052223, 0.940070736856596], [0.310779202800089, 1.456332053893072],
-        [0.184760934399551, 2.265139705602147], [0.701987784039800, 0.081632150899400], [0.074344030229297, 1.090248340917255]], 'float64')
-  u_ref = numpy.array([[0.049424652628448, 0.060480486336896], [0.178104127464007, 1.884873813495153], [1.204011484266777, 2.281351307871720],
-        [7.278512126426286, -0.390966087173334], [-0.084424326581145, -0.081725474934414], [4.042143689831097, -0.262576386580701]], 'float64')
-  d_ref = numpy.array([9.648467e-18, 2.63720683155e-12, 2.11822157653706e-10, 9.1047243e-17, 1.41163442535567e-10, 3.30581e-19], 'float64')
-
-  eps = 1e-10
-  assert numpy.allclose(mb.v, v_ref, eps)
-  assert numpy.allclose(mb.u, u_ref, eps)
-  assert numpy.allclose(mb.d, d_ref, eps)
-
-  # Calls the enrol function
-  m = JFAMachine(mb)
-
-  Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
-  Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
-  gse1 = GMMStats(2,3)
-  gse1.n = Ne[:,0]
-  gse1.sum_px = Fe[:,0].reshape(2,3)
-  gse2 = GMMStats(2,3)
-  gse2.n = Ne[:,1]
-  gse2.sum_px = Fe[:,1].reshape(2,3)
-
-  gse = [gse1, gse2]
-  t.enrol(m, gse, 5)
-
-  y_ref = numpy.array([0.555991469319657, 0.002773650670010], 'float64')
-  z_ref = numpy.array([8.2228e-20, 3.15216909492e-13, -1.48616735364395e-10, 1.0625905e-17, 3.7150503117895e-11, 1.71104e-19], 'float64')
-  assert numpy.allclose(m.y, y_ref, eps)
-  assert numpy.allclose(m.z, z_ref, eps)
-
-
-def test_ISVTrainAndEnrol():
-  # Train and enrol an 'ISVMachine'
-
-  eps = 1e-10
-  d_ref = numpy.array([0.39601136, 0.07348469, 0.47712682, 0.44738127, 0.43179856, 0.45086029], 'float64')
-  u_ref = numpy.array([[0.855125642430777, 0.563104284748032], [-0.325497865404680, 1.923598985291687], [0.511575659503837, 1.964288663083095], [9.330165761678115, 1.073623827995043], [0.511099245664012, 0.278551249248978], [5.065578541930268, 0.509565618051587]], 'float64')
-  z_ref = numpy.array([-0.079315777443826, 0.092702428248543, -0.342488761656616, -0.059922635809136 , 0.133539981073604, 0.213118695516570], 'float64')
-
-  # Calls the train function
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-  mb = ISVBase(ubm,2)
-  t = ISVTrainer(10, 4.)
-  #t.train(mb, TRAINING_STATS)
-  t.initialize(mb, TRAINING_STATS)
-  mb.u = M_u
-  for i in range(10):
-    t.e_step(mb, TRAINING_STATS)
-    t.m_step(mb)
-
-  assert numpy.allclose(mb.d, d_ref, eps)
-  assert numpy.allclose(mb.u, u_ref, eps)
-
-  # Calls the enrol function
-  m = ISVMachine(mb)
-
-  Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
-  Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
-  gse1 = GMMStats(2,3)
-  gse1.n = Ne[:,0]
-  gse1.sum_px = Fe[:,0].reshape(2,3)
-  gse2 = GMMStats(2,3)
-  gse2.n = Ne[:,1]
-  gse2.sum_px = Fe[:,1].reshape(2,3)
-
-  gse = [gse1, gse2]
-  t.enrol(m, gse, 5)
-  assert numpy.allclose(m.z, z_ref, eps)
-
-def test_JFATrainInitialize():
-  # Check that the initialization is consistent and using the rng (cf. issue #118)
-
-  eps = 1e-10
-
-  # UBM GMM
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-
-  ## JFA
-  jb = JFABase(ubm, 2, 2)
-  # first round
-  rng = bob.core.random.mt19937(0)
-  jt = JFATrainer(10)
-  jt.rng = rng
-  jt.initialize(jb, TRAINING_STATS)
-  u1 = jb.u
-  v1 = jb.v
-  d1 = jb.d
-
-  # second round
-  rng = bob.core.random.mt19937(0)
-  jt.rng = rng
-  jt.initialize(jb, TRAINING_STATS)
-  u2 = jb.u
-  v2 = jb.v
-  d2 = jb.d
-
-  assert numpy.allclose(u1, u2, eps)
-  assert numpy.allclose(v1, v2, eps)
-  assert numpy.allclose(d1, d2, eps)
-
-def test_ISVTrainInitialize():
-
-  # Check that the initialization is consistent and using the rng (cf. issue #118)
-  eps = 1e-10
-
-  # UBM GMM
-  ubm = GMMMachine(2,3)
-  ubm.mean_supervector = UBM_MEAN
-  ubm.variance_supervector = UBM_VAR
-
-  ## ISV
-  ib = ISVBase(ubm, 2)
-  # first round
-  rng = bob.core.random.mt19937(0)
-  it = ISVTrainer(10)
-  it.rng = rng
-  it.initialize(ib, TRAINING_STATS)
-  u1 = ib.u
-  d1 = ib.d
-
-  # second round
-  rng = bob.core.random.mt19937(0)
-  it.rng = rng
-  it.initialize(ib, TRAINING_STATS)
-  u2 = ib.u
-  d2 = ib.d
-
-  assert numpy.allclose(u1, u2, eps)
-  assert numpy.allclose(d1, d2, eps)
diff --git a/bob/learn/misc/test_kmeans.py b/bob/learn/misc/test_kmeans.py
deleted file mode 100644
index 0c6cc253c03df95771723d0d3a925358d5b9ec8b..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_kmeans.py
+++ /dev/null
@@ -1,96 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Thu Feb 16 17:57:10 2012 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests the KMeans machine
-"""
-
-import os
-import numpy
-import tempfile
-
-import bob.io.base
-from . import KMeansMachine
-
-def equals(x, y, epsilon):
-  return (abs(x - y) < epsilon)
-
-def test_KMeansMachine():
-  # Test a KMeansMachine
-
-  means = numpy.array([[3, 70, 0], [4, 72, 0]], 'float64')
-  mean  = numpy.array([3,70,1], 'float64')
-
-  # Initializes a KMeansMachine
-  km = KMeansMachine(2,3)
-  km.means = means
-  assert km.shape == (2,3)
-
-  # Sets and gets
-  assert (km.means == means).all()
-  assert (km.get_mean(0) == means[0,:]).all()  
-  assert (km.get_mean(1) == means[1,:]).all()
-  km.set_mean(0, mean)
-  assert (km.get_mean(0) == mean).all()
-
-  # Distance and closest mean
-  eps = 1e-10
-
-  assert equals( km.get_distance_from_mean(mean, 0), 0, eps)
-  assert equals( km.get_distance_from_mean(mean, 1), 6, eps)  
-  
-  (index, dist) = km.get_closest_mean(mean)
-  
-  assert index == 0
-  assert equals( dist, 0, eps)
-  assert equals( km.get_min_distance(mean), 0, eps)
-
-  # Loads and saves
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  km.save(bob.io.base.HDF5File(filename, 'w'))
-  km_loaded = KMeansMachine(bob.io.base.HDF5File(filename))
-  assert km == km_loaded
-
-  # Resize
-  km.resize(4,5)
-  assert km.shape == (4,5)
-
-  # Copy constructor and comparison operators
-  km.resize(2,3)
-  km2 = KMeansMachine(km)
-  assert km2 == km
-  assert (km2 != km) is False
-  assert km2.is_similar_to(km)
-  means2 = numpy.array([[3, 70, 0], [4, 72, 2]], 'float64')
-  km2.means = means2
-  assert (km2 == km) is False
-  assert km2 != km
-  assert (km2.is_similar_to(km)) is False
-
-  # Clean-up
-  os.unlink(filename)
-  
-  
-def test_KMeansMachine2():
-  kmeans             = bob.learn.misc.KMeansMachine(2,2)
-  kmeans.means       = numpy.array([[1.2,1.3],[0.2,-0.3]])
-
-  data               = numpy.array([
-                                  [1.,1],
-                                  [1.2, 3],
-                                  [0,0],
-                                  [0.3,0.2],
-                                  [0.2,0]
-                                 ])
-  variances, weights = kmeans.get_variances_and_weights_for_each_cluster(data)
-
-  variances_result = numpy.array([[ 0.01,1.],
-                                  [ 0.01555556, 0.00888889]])
-  weights_result = numpy.array([ 0.4, 0.6])
-  
-  assert equals(weights_result,weights, 1e-3).all()
-  assert equals(variances_result,variances,1e-3).all()
- 
diff --git a/bob/learn/misc/test_kmeans_trainer.py b/bob/learn/misc/test_kmeans_trainer.py
deleted file mode 100644
index 89f18e50128931e38ca0005d0eb11c18bf091b43..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_kmeans_trainer.py
+++ /dev/null
@@ -1,181 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Fri Jan 18 12:46:00 2013 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Test K-Means algorithm
-"""
-import numpy
-
-import bob.core
-import bob.io
-from bob.io.base.test_utils import datafile
-
-from bob.learn.misc import KMeansMachine, KMeansTrainer
-
-def equals(x, y, epsilon):
-  return (abs(x - y) < epsilon).all()
-
-def kmeans_plus_plus(machine, data, seed):
-  """Python implementation of K-Means++ (initialization)"""
-  n_data = data.shape[0]
-  rng = bob.core.random.mt19937(seed)
-  u = bob.core.random.uniform('int32', 0, n_data-1)
-  index = u(rng)
-  machine.set_mean(0, data[index,:])
-  weights = numpy.zeros(shape=(n_data,), dtype=numpy.float64)
-
-  for m in range(1,machine.dim_c):
-    for s in range(n_data):
-      s_cur = data[s,:]
-      w_cur = machine.get_distance_from_mean(s_cur, 0)
-      for i in range(m):
-        w_cur = min(machine.get_distance_from_mean(s_cur, i), w_cur)
-      weights[s] = w_cur
-    weights *= weights
-    weights /= numpy.sum(weights)
-    d = bob.core.random.discrete('int32', weights)
-    index = d(rng)
-    machine.set_mean(m, data[index,:])
-
-
-def NormalizeStdArray(path):
-  array = bob.io.base.load(path).astype('float64')
-  std = array.std(axis=0)
-  return (array/std, std)
-
-def multiplyVectorsByFactors(matrix, vector):
-  for i in range(0, matrix.shape[0]):
-    for j in range(0, matrix.shape[1]):
-      matrix[i, j] *= vector[j]
-
-def flipRows(array):
-  if len(array.shape) == 2:
-    return numpy.array([numpy.array(array[1, :]), numpy.array(array[0, :])], 'float64')
-  elif len(array.shape) == 1:
-    return numpy.array([array[1], array[0]], 'float64')
-  else:
-    raise Exception('Input type not supportd by flipRows')
-
-if hasattr(KMeansTrainer, 'KMEANS_PLUS_PLUS'):
-  def test_kmeans_plus_plus():
-
-    # Tests the K-Means++ initialization
-    dim_c = 5
-    dim_d = 7
-    n_samples = 150
-    data = numpy.random.randn(n_samples,dim_d)
-    seed = 0
-
-    # C++ implementation
-    machine = KMeansMachine(dim_c, dim_d)
-    trainer = KMeansTrainer()
-    trainer.rng = bob.core.random.mt19937(seed)
-    trainer.initialization_method = 'KMEANS_PLUS_PLUS'
-    trainer.initialize(machine, data)
-
-    # Python implementation
-    py_machine = KMeansMachine(dim_c, dim_d)
-    kmeans_plus_plus(py_machine, data, seed)
-    assert equals(machine.means, py_machine.means, 1e-8)
-
-def test_kmeans_noduplicate():
-  # Data/dimensions
-  dim_c = 2
-  dim_d = 3
-  seed = 0
-  data = numpy.array([[1,2,3],[1,2,3],[1,2,3],[4,5,6.]])
-  # Defines machine and trainer
-  machine = KMeansMachine(dim_c, dim_d)
-  trainer = KMeansTrainer()
-  trainer.rng = bob.core.random.mt19937(seed)
-  trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
-  trainer.initialize(machine, data)
-  # Makes sure that the two initial mean vectors selected are different
-  assert equals(machine.get_mean(0), machine.get_mean(1), 1e-8) == False
-
-
-def test_kmeans_a():
-
-  # Trains a KMeansMachine
-  # This files contains draws from two 1D Gaussian distributions:
-  #   * 100 samples from N(-10,1)
-  #   * 100 samples from N(10,1)
-  data = bob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__))
-
-  machine = KMeansMachine(2, 1)
-
-  trainer = KMeansTrainer()
-  trainer.train(machine, data)
-
-  [variances, weights] = machine.get_variances_and_weights_for_each_cluster(data)
-  variances_b = numpy.ndarray(shape=(2,1), dtype=numpy.float64)
-  weights_b = numpy.ndarray(shape=(2,), dtype=numpy.float64)
-  machine.__get_variances_and_weights_for_each_cluster_init__(variances_b, weights_b)
-  machine.__get_variances_and_weights_for_each_cluster_acc__(data, variances_b, weights_b)
-  machine.__get_variances_and_weights_for_each_cluster_fin__(variances_b, weights_b)
-  m1 = machine.get_mean(0)
-  m2 = machine.get_mean(1)
-
-  ## Check means [-10,10] / variances [1,1] / weights [0.5,0.5]
-  if(m1<m2): means=numpy.array(([m1[0],m2[0]]), 'float64')
-  else: means=numpy.array(([m2[0],m1[0]]), 'float64')
-  assert equals(means, numpy.array([-10.,10.]), 2e-1)
-  assert equals(variances, numpy.array([1.,1.]), 2e-1)
-  assert equals(weights, numpy.array([0.5,0.5]), 1e-3)
-
-  assert equals(variances, variances_b, 1e-8)
-  assert equals(weights, weights_b, 1e-8)
-
-
-
-def test_kmeans_b():
-
-  # Trains a KMeansMachine
-  (arStd,std) = NormalizeStdArray(datafile("faithful.torch3.hdf5", __name__))
-
-  machine = KMeansMachine(2, 2)
-
-  trainer = KMeansTrainer()
-  #trainer.seed = 1337
-  trainer.train(machine, arStd)
-
-  [variances, weights] = machine.get_variances_and_weights_for_each_cluster(arStd)
-  means = machine.means
-
-  multiplyVectorsByFactors(means, std)
-  multiplyVectorsByFactors(variances, std ** 2)
-
-  gmmWeights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
-  gmmMeans = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
-  gmmVariances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__))
-
-  if (means[0, 0] < means[1, 0]):
-    means = flipRows(means)
-    variances = flipRows(variances)
-    weights = flipRows(weights)
-
-  assert equals(means, gmmMeans, 1e-3)
-  assert equals(weights, gmmWeights, 1e-3)
-  assert equals(variances, gmmVariances, 1e-3)
-  
-  # Check comparison operators
-  trainer1 = KMeansTrainer()
-  trainer2 = KMeansTrainer()
-  #trainer1.rng = trainer2.rng
-
-  #assert trainer1 == trainer2
-  #assert (trainer1 != trainer2) is False
-  trainer1.max_iterations = 1337
-  #assert (trainer1 == trainer2) is False
-  #assert trainer1 != trainer2
-
-  # Check that there is no duplicate means during initialization
-  machine = KMeansMachine(2, 1)
-  trainer = KMeansTrainer()
-  trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
-  data = numpy.array([[1.], [1.], [1.], [1.], [1.], [1.], [2.], [3.]])
-  trainer.train(machine, data)
-  assert (numpy.isnan(machine.means).any()) == False
\ No newline at end of file
diff --git a/bob/learn/misc/test_linearscoring.py b/bob/learn/misc/test_linearscoring.py
deleted file mode 100644
index 8a96a0175331d8de00bbf744c3165cc42eb4d8f1..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_linearscoring.py
+++ /dev/null
@@ -1,128 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Francois Moulin <Francois.Moulin@idiap.ch>
-# Wed Jul 13 16:00:04 2011 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests on the LinearScoring function
-"""
-
-import numpy
-
-from . import GMMMachine, GMMStats, linear_scoring
-
-def test_LinearScoring():
-
-  ubm = GMMMachine(2, 2)
-  ubm.weights   = numpy.array([0.5, 0.5], 'float64')
-  ubm.means     = numpy.array([[3, 70], [4, 72]], 'float64')
-  ubm.variances = numpy.array([[1, 10], [2, 5]], 'float64')
-  ubm.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
-
-  model1 = GMMMachine(2, 2)
-  model1.weights   = numpy.array([0.5, 0.5], 'float64')
-  model1.means     = numpy.array([[1, 2], [3, 4]], 'float64')
-  model1.variances = numpy.array([[9, 10], [11, 12]], 'float64')
-  model1.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
-
-  model2 = GMMMachine(2, 2)
-  model2.weights   = numpy.array([0.5, 0.5], 'float64')
-  model2.means     = numpy.array([[5, 6], [7, 8]], 'float64')
-  model2.variances = numpy.array([[13, 14], [15, 16]], 'float64')
-  model2.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
-
-  stats1 = GMMStats(2, 2)
-  stats1.sum_px = numpy.array([[1, 2], [3, 4]], 'float64')
-  stats1.n = numpy.array([1, 2], 'float64')
-  stats1.t = 1+2
-
-  stats2 = GMMStats(2, 2)
-  stats2.sum_px = numpy.array([[5, 6], [7, 8]], 'float64')
-  stats2.n = numpy.array([3, 4], 'float64')
-  stats2.t = 3+4
-
-  stats3 = GMMStats(2, 2)
-  stats3.sum_px = numpy.array([[5, 6], [7, 3]], 'float64')
-  stats3.n = numpy.array([3, 4], 'float64')
-  stats3.t = 3+4
-
-  test_channeloffset = [numpy.array([9, 8, 7, 6], 'float64'), numpy.array([5, 4, 3, 2], 'float64'), numpy.array([1, 0, 1, 2], 'float64')]
-
-  # Reference scores (from Idiap internal matlab implementation)
-  ref_scores_00 = numpy.array([[2372.9, 5207.7, 5275.7], [2215.7, 4868.1, 4932.1]], 'float64')
-  ref_scores_01 = numpy.array( [[790.9666666666667, 743.9571428571428, 753.6714285714285], [738.5666666666667, 695.4428571428572, 704.5857142857144]], 'float64')
-  ref_scores_10 = numpy.array([[2615.5, 5434.1, 5392.5], [2381.5, 4999.3, 5022.5]], 'float64')
-  ref_scores_11 = numpy.array([[871.8333333333332, 776.3000000000001, 770.3571428571427], [793.8333333333333, 714.1857142857143, 717.5000000000000]], 'float64')
-
-
-  # 1/ Use GMMMachines
-  # 1/a/ Without test_channelOffset, without frame-length normalisation
-  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3])
-  assert (abs(scores - ref_scores_00) < 1e-7).all()
-
-  # 1/b/ Without test_channelOffset, with frame-length normalisation
-  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], [], True)
-  assert (abs(scores - ref_scores_01) < 1e-7).all()
-  #scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], (), True)
-  #assert (abs(scores - ref_scores_01) < 1e-7).all()
-  #scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], None, True)
-  #assert (abs(scores - ref_scores_01) < 1e-7).all()
-
-  # 1/c/ With test_channelOffset, without frame-length normalisation
-  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset)
-  assert (abs(scores - ref_scores_10) < 1e-7).all()
-
-  # 1/d/ With test_channelOffset, with frame-length normalisation
-  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset, True)
-  assert (abs(scores - ref_scores_11) < 1e-7).all()
-
-
-  # 2/ Use mean/variance supervectors
-  # 2/a/ Without test_channelOffset, without frame-length normalisation
-  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3])
-  assert (abs(scores - ref_scores_00) < 1e-7).all()
-
-  # 2/b/ Without test_channelOffset, with frame-length normalisation
-  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], [], True)
-  assert (abs(scores - ref_scores_01) < 1e-7).all()
-
-  # 2/c/ With test_channelOffset, without frame-length normalisation
-  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], test_channeloffset)
-  assert (abs(scores - ref_scores_10) < 1e-7).all()
-
-  # 2/d/ With test_channelOffset, with frame-length normalisation
-  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], test_channeloffset, True)
-  assert (abs(scores - ref_scores_11) < 1e-7).all()
-
-
-  # 3/ Using single model/sample
-  # 3/a/ without frame-length normalisation
-  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0])
-  assert abs(score - ref_scores_10[0,0]) < 1e-7
-  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1])
-  assert abs(score - ref_scores_10[0,1]) < 1e-7
-  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2])
-  assert abs(score - ref_scores_10[0,2]) < 1e-7
-  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0])
-  assert abs(score - ref_scores_10[1,0]) < 1e-7
-  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1])
-  assert abs(score - ref_scores_10[1,1]) < 1e-7
-  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2])
-  assert abs(score - ref_scores_10[1,2]) < 1e-7
-
-
-  # 3/b/ without frame-length normalisation
-  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0], True)
-  assert abs(score - ref_scores_11[0,0]) < 1e-7
-  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1], True)
-  assert abs(score - ref_scores_11[0,1]) < 1e-7
-  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2], True)
-  assert abs(score - ref_scores_11[0,2]) < 1e-7
-  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0], True)
-  assert abs(score - ref_scores_11[1,0]) < 1e-7
-  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1], True)
-  assert abs(score - ref_scores_11[1,1]) < 1e-7
-  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2], True)
-  assert abs(score - ref_scores_11[1,2]) < 1e-7
-
diff --git a/bob/learn/misc/test_plda.py b/bob/learn/misc/test_plda.py
deleted file mode 100644
index 37f9c331de13bf3812cc4e46ba6a5d1605ba2379..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_plda.py
+++ /dev/null
@@ -1,565 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Sat Oct 22 23:01:09 2011 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests PLDA machine
-"""
-
-import numpy
-import os
-import tempfile
-import nose.tools
-import math
-
-import bob.io.base
-
-from . import PLDABase, PLDAMachine
-
-# Defines common variables globally
-# Dimensionalities
-C_dim_d = 7
-C_dim_f = 2
-C_dim_g = 3
-# Values for F and G
-C_G=numpy.array([-1.1424, -0.5044, -0.1917,
-      -0.6249,  0.1021, -0.8658,
-      -1.1687,  1.1963,  0.1807,
-      0.3926,  0.1203,  1.2665,
-      1.3018, -1.0368, -0.2512,
-      -0.5936, -0.8571, -0.2046,
-      0.4364, -0.1699, -2.2015], 'float64').reshape(C_dim_d, C_dim_g)
-# F <-> PCA on G
-C_F=numpy.array([-0.054222647972093, -0.000000000783146,
-      0.596449127693018,  0.000000006265167,
-      0.298224563846509,  0.000000003132583,
-      0.447336845769764,  0.000000009397750,
-      -0.108445295944185, -0.000000001566292,
-      -0.501559493741856, -0.000000006265167,
-      -0.298224563846509, -0.000000003132583], 'float64').reshape(C_dim_d, C_dim_f)
-
-def equals(x, y, epsilon):
-  return (abs(x - y) < epsilon).all()
-
-def compute_i_sigma(sigma):
-  # Inverse of a diagonal matrix (represented by a 1D numpy array)
-  return (1. / sigma)
-
-def compute_alpha(G, sigma):
-  # alpha = (Id + G^T.sigma^-1.G)^-1 = \mathcal{G}
-  dim_g = G.shape[1]
-  isigma = numpy.diag(compute_i_sigma(sigma))
-  return numpy.linalg.inv(numpy.eye(dim_g) + numpy.dot(numpy.dot(G.transpose(), isigma), G))
-
-def compute_beta(G, sigma):
-  # beta = (sigma + G.G^T)^-1 = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1 = \mathcal{S}
-  isigma = numpy.diag(compute_i_sigma(sigma))
-  gt_isigma = numpy.dot(G.transpose(), isigma)
-  alpha = compute_alpha(G, sigma)
-  return (isigma - numpy.dot(numpy.dot(gt_isigma.transpose(), alpha), gt_isigma))
-
-def compute_gamma(F, G, sigma, a):
-  # gamma_a = (Id + a.F^T.beta.F)^-1 = \mathcal{F}_{a}
-  dim_f = F.shape[1]
-  beta = compute_beta(G, sigma)
-  return numpy.linalg.inv(numpy.eye(dim_f) + a * numpy.dot(numpy.dot(F.transpose(), beta), F))
-
-def compute_ft_beta(F, G, sigma):
-  # F^T.beta = F^T.\mathcal{S}
-  beta = compute_beta(G, sigma)
-  return numpy.dot(numpy.transpose(F), beta)
-
-def compute_gt_i_sigma(G, sigma):
-  # G^T.sigma^-1
-  isigma = compute_i_sigma(sigma)
-  return numpy.transpose(G) * isigma
-
-def compute_logdet_alpha(G, sigma):
-  # \log(\det(\alpha)) = \log(\det(\mathcal{G}))
-  alpha = compute_alpha(G, sigma)
-  return math.log(numpy.linalg.det(alpha))
-
-def compute_logdet_sigma(sigma):
-  # \log(\det(\sigma)) = \log(\det(\sigma)) = \log(\prod(\sigma_{i}))
-  return math.log(numpy.prod(sigma))
-
-def compute_loglike_constterm(F, G, sigma, a):
-  # loglike_constterm[a] = a/2 * ( -D*\log(2*pi) -\log|\sigma| +\log|\alpha| +\log|\gamma_a|)
-  gamma_a = compute_gamma(F, G, sigma, a)
-  logdet_gamma_a = math.log(abs(numpy.linalg.det(gamma_a)))
-  ah = a/2.
-  dim_d =  F.shape[0]
-  logdet_sigma = compute_logdet_sigma(sigma)
-  logdet_alpha = compute_logdet_alpha(G, sigma)
-  res = -ah*dim_d*math.log(2*math.pi) - ah*logdet_sigma + ah*logdet_alpha + logdet_gamma_a/2.
-  return res;
-
-def compute_log_likelihood_point_estimate(observation, mu, F, G, sigma, hi, wij):
-  """
-  This function computes p(x_{ij} | h_{i}, w_{ij}, \Theta), which is given by
-  N_{x}[\mu + Fh_{i} + Gw_{ij} + epsilon_{ij}, \Sigma], N_{x} being a
-  Gaussian distribution. As it returns the corresponding log likelihood,
-  this is given by the sum of the following three terms:
-  C1 = -dim_d/2 log(2pi)
-  C2 = -1/2 log(det(\Sigma))
-  C3 = -1/2 (x_{ij}-\mu-Fh_{i}-Gw_{ij})^{T}\Sigma^{-1}(x_{ij}-\mu-Fh_{i}-Gw_{ij})
-  """
-
-  ### Pre-computes some of the constants
-  dim_d          = observation.shape[0]             # A scalar
-  log_2pi        = numpy.log(2. * numpy.pi)        # A scalar
-  C1             = -(dim_d / 2.) * log_2pi         # A scalar
-  C2             = -(1. / 2.) * numpy.sum( numpy.log(sigma) ) # (dim_d, 1)
-
-  ### Subtract the identity and session components from the observed vector.
-  session_plus_identity  = numpy.dot(F, hi) + numpy.dot(G, wij)
-  normalised_observation = numpy.reshape(observation - mu - session_plus_identity, (dim_d,1))
-  ### Now calculate C3
-  sigma_inverse  = numpy.reshape(1. / sigma, (dim_d,1))                      # (dim_d, 1)
-  C3             = -(1. / 2.) * numpy.sum(normalised_observation * sigma_inverse * normalised_observation)
-
-  ### Returns the log likelihood
-  log_likelihood     = C1 + C2 + C3
-  return (log_likelihood)
-
-
-def compute_log_likelihood(observations, mu, F, G, sigma):
-  """
-  This function computes the log-likelihood of the observations given the parameters
-  of the PLDA model. This is done by fulling integrating out the latent variables.
-  """
-  # Work out the number of samples that we have and normalise the data.
-  J_i                = observations.shape[0];                  # An integer > 0
-  norm_observations  = observations - numpy.tile(mu, [J_i,1]);        # (J_i, D_x)
-
-  # There are three terms that need to be computed: C1, C2 and C3
-
-  # 1. Computes C1
-  # C1 = - J_{i} * dim_d/2 log(2*pi)
-  dim_d          = observations.shape[1]             # A scalar
-  dim_f          = F.shape[1]
-  log_2pi        = numpy.log(2. * numpy.pi);        # A scalar
-  C1             = - J_i * (dim_d / 2.) * log_2pi;         # A scalar
-
-  # 2. Computes C2
-  # C2 = - J_i/2 * [log(det(sigma)) - log(det(alpha^-1))] + log(det(gamma_{J_i}))/2
-  ld_sigma = compute_logdet_sigma(sigma)
-  ld_alpha = compute_logdet_alpha(G, sigma)
-  gamma = compute_gamma(F, G, sigma, J_i)
-  ld_gamma = math.log(numpy.linalg.det(gamma))
-  C2 = - J_i/2.*(ld_sigma - ld_alpha)  + ld_gamma/2.
-
-  # 3. Computes C3
-  # This is a quadratic part and consists of
-  # C3   = -0.5 * sum x^T beta x + 0.5 * Quadratic term in x
-  # C3   = -0.5 * (C3a - C3b)
-  C3a                  = 0.0;
-  C3b_sum_part         = numpy.zeros((dim_f,1));
-  isigma               = numpy.diag(compute_i_sigma(sigma))
-  beta                 = compute_beta(G, sigma)
-  ft_beta              = numpy.dot(numpy.transpose(F), beta)
-  for j in range(0, J_i):
-    ### Calculations for C3a
-    current_vector           = numpy.reshape(norm_observations[j,:], (dim_d,1));  # (D_x, 1)
-    vector_E                 = numpy.dot(beta, current_vector);                   # (D_x, 1)
-    current_result           = numpy.dot(current_vector.transpose(), vector_E);   # A floating point value
-    C3a                      = C3a + current_result[0][0];                        # A floating point value
-    ### Calculations for C3b
-    C3b_sum_part             = C3b_sum_part + numpy.dot(ft_beta, current_vector);  # (nf, 1)
-
-  ### Final calculations for C3b, using the matrix gamma_{J_i}
-  C3b                        = numpy.dot(numpy.dot(C3b_sum_part.transpose(), gamma), C3b_sum_part);
-  C3                         = -0.5 * (C3a - C3b[0][0]);
-
-  return C1 + C2 + C3
-
-
-def test_plda_basemachine():
-  # Data used for performing the tests
-  sigma = numpy.ndarray(C_dim_d, 'float64')
-  sigma.fill(0.01)
-  mu = numpy.ndarray(C_dim_d, 'float64')
-  mu.fill(0)
-
-  # Defines reference results based on matlab
-  alpha_ref = numpy.array([ 0.002189051545735,  0.001127099941432,
-    -0.000145483208153, 0.001127099941432,  0.003549267943741,
-    -0.000552001405453, -0.000145483208153, -0.000552001405453,
-    0.001440505362615], 'float64').reshape(C_dim_g, C_dim_g)
-  beta_ref  = numpy.array([ 50.587191765140361, -14.512478352504877,
-    -0.294799164567830,  13.382002504394316,  9.202063877660278,
-    -43.182264846086497,  11.932345916716455, -14.512478352504878,
-    82.320149045633045, -12.605578822979698,  19.618675892079366,
-    13.033691341150439,  -8.004874490989799, -21.547363307109187,
-    -0.294799164567832, -12.605578822979696,  52.123885798398241,
-    4.363739008635009, 44.847177605628545,  16.438137537463710,
-    5.137421840557050, 13.382002504394316,  19.618675892079366,
-    4.363739008635011,  75.070401560513488, -4.515472972526140,
-    9.752862741017488,  34.196127678931106, 9.202063877660285,
-    13.033691341150439,  44.847177605628552,  -4.515472972526142,
-    56.189416227691098,  -7.536676357632515, -10.555735414707383,
-    -43.182264846086497,  -8.004874490989799,  16.438137537463703,
-    9.752862741017490, -7.536676357632518,  56.430571485722126,
-    9.471758169835317, 11.932345916716461, -21.547363307109187,
-    5.137421840557051,  34.196127678931099, -10.555735414707385,
-    9.471758169835320,  27.996266602110637], 'float64').reshape(C_dim_d, C_dim_d)
-  gamma3_ref = numpy.array([ 0.005318799462241, -0.000000012993151,
-    -0.000000012993151,  0.999999999999996], 'float64').reshape(C_dim_f, C_dim_f)
-
-  # Constructor tests
-  #m = PLDABase()
-  #assert m.dim_d == 0
-  #assert m.dim_f == 0
-  #assert m.dim_g == 0
-  #del m
-  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
-  assert m.shape[0] == C_dim_d
-  assert m.shape[1] == C_dim_f
-  assert m.shape[2] == C_dim_g
-  assert abs(m.variance_threshold - 0.) < 1e-10
-  del m
-  m = PLDABase(C_dim_d, C_dim_f, C_dim_g, 1e-2)
-  assert m.shape[0] == C_dim_d
-  assert m.shape[1] == C_dim_f
-  assert m.shape[2] == C_dim_g
-  assert abs(m.variance_threshold - 1e-2) < 1e-10
-  del m
-
-  # Defines base machine
-  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
-  #m.resize(C_dim_d, C_dim_f, C_dim_g)
-  # Sets the current mu, F, G and sigma
-  m.mu = mu
-  m.f = C_F
-  m.g = C_G
-  m.sigma = sigma
-  gamma3 = m.get_add_gamma(3).copy()
-  constTerm3 = m.get_add_log_like_const_term(3)
-
-  # Compares precomputed values to matlab reference
-  for ii in range(m.__alpha__.shape[0]):
-    for jj in range(m.__alpha__.shape[1]):
-      absdiff = abs(m.__alpha__[ii,jj]- alpha_ref[ii,jj])
-      assert absdiff < 1e-10, 'PLDABase alpha matrix does not match reference at (%d,%d) to 10^-10: |%g-%g| = %g' % (ii, jj, m.__alpha__[ii,jj], alpha_ref[ii,jj], absdiff)
-  assert equals(m.__alpha__, alpha_ref, 1e-10)
-  assert equals(m.__beta__, beta_ref, 1e-10)
-  assert equals(gamma3, gamma3_ref, 1e-10)
-
-  # Compares precomputed values to the ones returned by python implementation
-  assert equals(m.__isigma__, compute_i_sigma(sigma), 1e-10)
-  assert equals(m.__alpha__, compute_alpha(C_G,sigma), 1e-10)
-  assert equals(m.__beta__, compute_beta(C_G,sigma), 1e-10)
-  assert equals(m.get_add_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
-  assert m.has_gamma(3)
-  assert equals(m.get_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
-  assert equals(m.__ft_beta__, compute_ft_beta(C_F,C_G,sigma), 1e-10)
-  assert equals(m.__gt_i_sigma__, compute_gt_i_sigma(C_G,sigma), 1e-10)
-  assert math.fabs(m.__logdet_alpha__ - compute_logdet_alpha(C_G,sigma)) < 1e-10
-  assert math.fabs(m.__logdet_sigma__ - compute_logdet_sigma(sigma)) < 1e-10
-  assert abs(m.get_add_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
-  assert m.has_log_like_const_term(3)
-  assert abs(m.get_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
-
-  # Defines base machine
-  del m
-  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
-  # Sets the current mu, F, G and sigma
-  m.mu = mu
-  m.f = C_F
-  m.g = C_G
-  m.sigma = sigma
-  gamma3 = m.get_add_gamma(3).copy()
-  constTerm3 = m.get_add_log_like_const_term(3)
-
-  # Compares precomputed values to matlab reference
-  assert equals(m.__alpha__, alpha_ref, 1e-10)
-  assert equals(m.__beta__, beta_ref, 1e-10)
-  assert equals(gamma3, gamma3_ref, 1e-10)
-
-  # values before being saved
-  isigma = m.__isigma__.copy()
-  alpha = m.__alpha__.copy()
-  beta = m.__beta__.copy()
-  FtBeta = m.__ft_beta__.copy()
-  GtISigma = m.__gt_i_sigma__.copy()
-  logdetAlpha = m.__logdet_alpha__
-  logdetSigma = m.__logdet_sigma__
-
-  # Saves to file, loads and compares to original
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(bob.io.base.HDF5File(filename, 'w'))
-  m_loaded = PLDABase(bob.io.base.HDF5File(filename))
-
-  # Compares the values loaded with the former ones
-  assert m_loaded == m
-  assert (m_loaded != m) is False
-  assert equals(m_loaded.mu, mu, 1e-10)
-  assert equals(m_loaded.f, C_F, 1e-10)
-  assert equals(m_loaded.g, C_G, 1e-10)
-  assert equals(m_loaded.sigma, sigma, 1e-10)
-  assert equals(m_loaded.__isigma__, isigma, 1e-10)
-  assert equals(m_loaded.__alpha__, alpha, 1e-10)
-  assert equals(m_loaded.__beta__, beta, 1e-10)
-  assert equals(m_loaded.__ft_beta__, FtBeta, 1e-10)
-  assert equals(m_loaded.__gt_i_sigma__, GtISigma, 1e-10)
-  assert abs(m_loaded.__logdet_alpha__ - logdetAlpha) < 1e-10
-  assert abs(m_loaded.__logdet_sigma__ - logdetSigma) < 1e-10
-  assert m_loaded.has_gamma(3)
-  assert equals(m_loaded.get_gamma(3), gamma3_ref, 1e-10)
-  assert equals(m_loaded.get_add_gamma(3), gamma3_ref, 1e-10)
-  assert m_loaded.has_log_like_const_term(3)
-  assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
-
-  # Compares the values loaded with the former ones when copying
-  m_copy = PLDABase(m_loaded)
-  assert m_loaded == m_copy
-  assert (m_loaded != m_copy) is False
-  # Test clear_maps method
-  assert m_copy.has_gamma(3)
-  assert m_copy.has_log_like_const_term(3)
-  m_copy.clear_maps()
-  assert (m_copy.has_gamma(3)) is False
-  assert (m_copy.has_log_like_const_term(3)) is False
-
-  # Check variance flooring thresholds-related methods
-  v_zo = numpy.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
-  v_zo_ = 0.01
-  v_zzo = numpy.array([0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001])
-  v_zzo_ = 0.001
-  m_copy.variance_threshold = v_zo_
-  assert (m_loaded == m_copy) is False
-  assert m_loaded != m_copy
-  m_copy.variance_threshold = v_zzo_
-  m_copy.sigma = v_zo
-  assert equals(m_copy.sigma, v_zo, 1e-10)
-  m_copy.variance_threshold = v_zo_
-  m_copy.sigma = v_zzo
-  assert equals(m_copy.sigma, v_zo, 1e-10)
-  m_copy.variance_threshold = v_zzo_
-  m_copy.sigma = v_zzo
-  assert equals(m_copy.sigma, v_zzo, 1e-10)
-  m_copy.variance_threshold = v_zo_
-  assert equals(m_copy.sigma, v_zo, 1e-10)
-
-  # Clean-up
-  os.unlink(filename)
-
-
-def test_plda_basemachine_loglikelihood_pointestimate():
-
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  sigma = numpy.ndarray(C_dim_d, 'float64')
-  sigma.fill(0.01)
-  mu = numpy.ndarray(C_dim_d, 'float64')
-  mu.fill(0)
-  xij = numpy.array([0.7, 1.3, 2.5, 0.3, 1.3, 2.7, 0.9])
-  hi = numpy.array([-0.5, 0.5])
-  wij = numpy.array([-0.1, 0.2, 0.3])
-
-  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
-  # Sets the current mu, F, G and sigma
-  m.mu = mu
-  m.f = C_F
-  m.g = C_G
-  m.sigma = sigma
-
-  #assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6)
-  log_likelihood_point_estimate        = m.compute_log_likelihood_point_estimate(xij, hi, wij)
-  log_likelihood_point_estimate_python = compute_log_likelihood_point_estimate(xij,         mu, C_F, C_G, sigma, hi, wij)
-  assert equals(log_likelihood_point_estimate, log_likelihood_point_estimate_python, 1e-6)
-
-
-def test_plda_machine():
-
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  sigma = numpy.ndarray(C_dim_d, 'float64')
-  sigma.fill(0.01)
-  mu = numpy.ndarray(C_dim_d, 'float64')
-  mu.fill(0)
-
-  # Defines base machine
-  mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
-  # Sets the current mu, F, G and sigma
-  mb.mu = mu
-  mb.f = C_F
-  mb.g = C_G
-  mb.sigma = sigma
-
-  # Test constructors and dim getters
-  m = PLDAMachine(mb)
-  assert m.shape[0] == C_dim_d
-  assert m.shape[1]== C_dim_f
-  assert m.shape[2] == C_dim_g
-
-  m0 = PLDAMachine(mb)
-  #m0.plda_base = mb
-  assert m0.shape[0]  == C_dim_d
-  assert m0.shape[1]  == C_dim_f
-  assert m0.shape[2]  == C_dim_g
-
-  # Defines machine
-  n_samples = 2
-  WSumXitBetaXi = 0.37
-  weightedSum = numpy.array([1.39,0.54], 'float64')
-  log_likelihood = -0.22
-
-  m.n_samples = n_samples
-  m.w_sum_xit_beta_xi = WSumXitBetaXi
-  m.weighted_sum = weightedSum
-  m.log_likelihood = log_likelihood
-
-  gamma3 = m.get_add_gamma(3).copy()
-  constTerm3 = m.get_add_log_like_const_term(3)
-
-  # Saves to file, loads and compares to original
-  filename = str(tempfile.mkstemp(".hdf5")[1])
-  m.save(bob.io.base.HDF5File(filename, 'w'))
-  m_loaded = PLDAMachine(bob.io.base.HDF5File(filename), mb)
-
-  # Compares the values loaded with the former ones
-  assert m_loaded == m
-  assert (m_loaded != m) is False
-  assert abs(m_loaded.n_samples - n_samples) < 1e-10
-  assert abs(m_loaded.w_sum_xit_beta_xi - WSumXitBetaXi) < 1e-10
-  assert equals(m_loaded.weighted_sum, weightedSum, 1e-10)
-  assert abs(m_loaded.log_likelihood - log_likelihood) < 1e-10
-  assert m_loaded.has_gamma(3)
-  assert equals(m_loaded.get_add_gamma(3), gamma3, 1e-10)
-  assert equals(m_loaded.get_gamma(3), gamma3, 1e-10)
-  assert m_loaded.has_log_like_const_term(3)
-  assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
-  assert abs(m_loaded.get_log_like_const_term(3) - constTerm3) < 1e-10
-
-  # Test clear_maps method
-  assert m_loaded.has_gamma(3)
-  assert m_loaded.has_log_like_const_term(3)
-  m_loaded.clear_maps()
-  assert (m_loaded.has_gamma(3)) is False
-  assert (m_loaded.has_log_like_const_term(3)) is False
-
-  # Check exceptions
-  #m_loaded2 = PLDAMachine(bob.io.base.HDF5File(filename))
-  #m_loaded2.load(bob.io.base.HDF5File(filename))
-  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'shape')
-  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f')
-  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g')
-  #nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.])
-  #nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])
-
-  # Clean-up
-  os.unlink(filename)
-
-
-def test_plda_machine_log_likelihood_Python():
-
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  sigma = numpy.ndarray(C_dim_d, 'float64')
-  sigma.fill(0.01)
-  mu = numpy.ndarray(C_dim_d, 'float64')
-  mu.fill(0)
-
-  # Defines base machine
-  mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
-  # Sets the current mu, F, G and sigma
-  mb.mu = mu
-  mb.f = C_F
-  mb.g = C_G
-  mb.sigma = sigma
-
-  # Defines machine
-  m = PLDAMachine(mb)
-
-  # Defines (random) samples and check compute_log_likelihood method
-  ar_e = numpy.random.randn(2,C_dim_d)
-  ar_p = numpy.random.randn(C_dim_d)
-  ar_s = numpy.vstack([ar_e, ar_p])
-  assert abs(m.compute_log_likelihood(ar_s, False) - compute_log_likelihood(ar_s, mu, C_F, C_G, sigma)) < 1e-10
-  ar_p2d = numpy.reshape(ar_p, (1,C_dim_d))
-  assert abs(m.compute_log_likelihood(ar_p, False) - compute_log_likelihood(ar_p2d, mu, C_F, C_G, sigma)) < 1e-10
-
-  # Defines (random) samples and check forward method
-  ar2_e = numpy.random.randn(4,C_dim_d)
-  ar2_p = numpy.random.randn(C_dim_d)
-  ar2_s = numpy.vstack([ar2_e, ar2_p])
-  m.log_likelihood = m.compute_log_likelihood(ar2_e, False)
-  llr = m.compute_log_likelihood(ar2_s, True) - (m.compute_log_likelihood(ar2_s, False) + m.log_likelihood)
-  assert abs(m(ar2_s) - llr) < 1e-10
-  ar2_p2d = numpy.random.randn(3,C_dim_d)
-  ar2_s2d = numpy.vstack([ar2_e, ar2_p2d])
-  llr2d = m.compute_log_likelihood(ar2_s2d, True) - (m.compute_log_likelihood(ar2_s2d, False) + m.log_likelihood)
-  assert abs(m(ar2_s2d) - llr2d) < 1e-10
-
-def test_plda_machine_log_likelihood_Prince():
-
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  D = 7
-  nf = 2
-  ng = 3
-
-  # initial values for F, G and sigma
-  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
-    -0.6249,  0.1021, -0.8658,
-    -1.1687,  1.1963,  0.1807,
-    0.3926,  0.1203,  1.2665,
-    1.3018, -1.0368, -0.2512,
-    -0.5936, -0.8571, -0.2046,
-    0.4364, -0.1699, -2.2015]).reshape(D,ng)
-  # F <-> PCA on G
-  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
-    0.596449127693018,  0.000000006265167,
-    0.298224563846509,  0.000000003132583,
-    0.447336845769764,  0.000000009397750,
-    -0.108445295944185, -0.000000001566292,
-    -0.501559493741856, -0.000000006265167,
-    -0.298224563846509, -0.000000003132583]).reshape(D,nf)
-  sigma_init = 0.01 * numpy.ones((D,), 'float64')
-  mean_zero = numpy.zeros((D,), 'float64')
-
-  # base machine
-  mb = PLDABase(D,nf,ng)
-  mb.sigma = sigma_init
-  mb.g = G_init
-  mb.f = F_init
-  mb.mu = mean_zero
-
-  # Data for likelihood computation
-  x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
-  x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
-  x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
-  X = numpy.ndarray((3,D), 'float64')
-  X[0,:] = x1
-  X[1,:] = x2
-  X[2,:] = x3
-  a = []
-  a.append(x1)
-  a.append(x2)
-  a.append(x3)
-  a = numpy.array(a)
-
-  # reference likelihood from Prince implementation
-  ll_ref = -182.8880743535197
-
-  # machine
-  m = PLDAMachine(mb)
-  ll = m.compute_log_likelihood(X)
-  assert abs(ll - ll_ref) < 1e-10
-
-  # log likelihood ratio
-  Y = numpy.ndarray((2,D), 'float64')
-  Y[0,:] = x1
-  Y[1,:] = x2
-  Z = numpy.ndarray((1,D), 'float64')
-  Z[0,:] = x3
-  llX = m.compute_log_likelihood(X)
-  llY = m.compute_log_likelihood(Y)
-  llZ = m.compute_log_likelihood(Z)
-  # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
-  # and [x3] separately
-  llr_ref = -4.43695386675
-  assert abs((llX - (llY + llZ)) - llr_ref) < 1e-10
diff --git a/bob/learn/misc/test_plda_trainer.py b/bob/learn/misc/test_plda_trainer.py
deleted file mode 100644
index 553ff4fba6db5c6de88d63288a0f6949dcc5fef4..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_plda_trainer.py
+++ /dev/null
@@ -1,741 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Fri Oct 14 18:07:56 2011 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests PLDA trainer
-"""
-
-import sys
-import numpy
-import numpy.linalg
-
-from . import PLDATrainer, PLDABase, PLDAMachine
-
-class PythonPLDATrainer():
-  """A simplified (and slower) version of the PLDATrainer"""
-
-  def __init__(self, convergence_threshold=0.001, max_iterations=10,
-      compute_likelihood=False, use_sum_second_order=True):
-    # Our state
-    self.m_convergence_threshold = convergence_threshold
-    self.m_max_iterations = max_iterations
-    self.m_compute_likelihood = compute_likelihood
-    self.m_dim_f = 0
-    self.m_dim_g = 0
-    self.m_B = numpy.ndarray(shape=(0,0), dtype=numpy.float64)
-    self.m_n_samples_per_id = numpy.ndarray(shape=(0,), dtype=numpy.float64)
-    self.m_z_first_order = []
-    self.m_z_second_order = []
-    self.m_sum_z_second_order = numpy.ndarray(shape=(0,0), dtype=numpy.float64)
-
-  def reset():
-    """Resets our internal state"""
-    self.m_convergence_threshold = 0.001
-    self.m_max_iterations = 10
-    self.m_compute_likelihood = False
-    self.m_dim_f = 0
-    self.m_dim_g = 0
-    self.m_n_samples_per_id = numpy.ndarray(shape=(0,), dtype=numpy.float64)
-    self.m_z_first_order = []
-    self.m_z_second_order = []
-    self.m_sum_z_second_order = numpy.ndarray(shape=(0,0), dtype=numpy.float64)
-
-  def __check_training_data__(self, data):
-    if len(data) == 0:
-      raise RuntimeError("Training data set is empty")
-    n_features = data[0].shape[1]
-    for v in data:
-      if(v.shape[1] != n_features):
-        raise RuntimeError("Inconsistent feature dimensionality in training data set")
-
-  def __init_members__(self, data):
-    n_features = data[0].shape[1]
-    self.m_z_first_order = []
-    df_dg = self.m_dim_f+self.m_dim_g
-    self.m_sum_z_second_order.resize(df_dg, df_dg)
-    self.m_n_samples_per_id.resize(len(data))
-    self.m_B.resize(n_features, df_dg)
-    for i in range(len(data)):
-      ns_i = data[i].shape[0]
-      self.m_n_samples_per_id[i] = ns_i
-      self.m_z_first_order.append(numpy.ndarray(shape=(ns_i, df_dg), dtype=numpy.float64))
-      self.m_z_second_order.append(numpy.ndarray(shape=(ns_i, df_dg, df_dg), dtype=numpy.float64))
-
-  def __init_mu__(self, machine, data):
-    mu = numpy.zeros(shape=machine.mu.shape[0], dtype=numpy.float64)
-    c = 0
-    # Computes the mean of the data
-    for v in data:
-      for i in range(v.shape[0]):
-        mu += v[i,:]
-        c +=1
-    mu /= c
-    machine.mu = mu
-
-  def __init_f__(self, machine, data):
-    n_ids = len(data)
-    S = numpy.zeros(shape=(machine.shape[0], n_ids), dtype=numpy.float64)
-    Si_sum = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
-    for i in range(n_ids):
-      Si = S[:,i]
-      data_i = data[i]
-      for j in range(data_i.shape[0]):
-        Si += data_i[j,:]
-      Si /= data_i.shape[0]
-      Si_sum += Si
-    Si_sum /= n_ids
-
-    S = S - numpy.tile(Si_sum.reshape([machine.shape[0],1]), [1,n_ids])
-    U, sigma, S_ = numpy.linalg.svd(S, full_matrices=False)
-    U_slice = U[:,0:self.m_dim_f]
-    sigma_slice = sigma[0:self.m_dim_f]
-    sigma_slice_sqrt = numpy.sqrt(sigma_slice)
-    machine.f = U_slice / sigma_slice_sqrt
-
-  def __init_g__(self, machine, data):
-    n_samples = 0
-    for v in data:
-      n_samples += v.shape[0]
-    S = numpy.zeros(shape=(machine.shape[0], n_samples), dtype=numpy.float64)
-    Si_sum = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
-    cache = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
-    c = 0
-    for i in range(len(data)):
-      cache = 0
-      data_i = data[i]
-      for j in range(data_i.shape[0]):
-        cache += data_i[j,:]
-      cache /= data_i.shape[0]
-      for j in range(data_i.shape[0]):
-        S[:,c] = data_i[j,:] - cache
-        Si_sum += S[:,c]
-        c += 1
-    Si_sum /= n_samples
-
-    S = S - numpy.tile(Si_sum.reshape([machine.shape[0],1]), [1,n_samples])
-    U, sigma, S_ = numpy.linalg.svd(S, full_matrices=False)
-    U_slice = U[:,0:self.m_dim_g]
-    sigma_slice_sqrt = numpy.sqrt(sigma[0:self.m_dim_g])
-    machine.g = U_slice / sigma_slice_sqrt
-
-  def __init_sigma__(self, machine, data, factor = 1.):
-    """As a variance of the data"""
-    cache1 = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
-    cache2 = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
-    n_samples = 0
-    for v in data:
-      for j in range(v.shape[0]):
-        cache1 += v[j,:]
-      n_samples += v.shape[0]
-    cache1 /= n_samples
-    for v in data:
-      for j in range(v.shape[0]):
-        cache2 += numpy.square(v[j,:] - cache1)
-    machine.sigma = factor * cache2 / (n_samples - 1)
-
-  def __init_mu_f_g_sigma__(self, machine, data):
-    self.__init_mu__(machine, data)
-    self.__init_f__(machine, data)
-    self.__init_g__(machine, data)
-    self.__init_sigma__(machine, data)
-
-  def initialize(self, machine, data):
-    self.__check_training_data__(data)
-    n_features = data[0].shape[1]
-    if(machine.shape[0] != n_features):
-      raise RuntimeError("Inconsistent feature dimensionality between the machine and the training data set")
-    self.m_dim_f = machine.shape[1]
-    self.m_dim_g = machine.shape[2]
-    self.__init_members__(data)
-    # Warning: Default initialization of mu, F, G, sigma using scatters
-    self.__init_mu_f_g_sigma__(machine, data)
-    # Make sure that the precomputation has been performed
-    machine.__precompute__()
-
-  def __compute_sufficient_statistics_given_observations__(self, machine, observations):
-    """
-    We compute the expected values of the latent variables given the observations
-    and parameters of the model.
-
-    First order or the expected value of the latent variables.:
-      F = (I+A^{T}\Sigma'^{-1}A)^{-1} * A^{T}\Sigma^{-1} (\tilde{x}_{s}-\mu').
-    Second order stats:
-      S = (I+A^{T}\Sigma'^{-1}A)^{-1} + (F*F^{T}).
-    """
-
-    # Get the number of observations
-    J_i                       = observations.shape[0]            # An integer > 0
-    dim_d                     = observations.shape[1]            # A scalar
-    # Useful values
-    mu                        = machine.mu
-    F                         = machine.f
-    G                         = machine.g
-    sigma                     = machine.sigma
-    isigma                    = machine.__isigma__
-    alpha                     = machine.__alpha__
-    ft_beta                   = machine.__ft_beta__
-    gamma                     = machine.get_add_gamma(J_i)
-    # Normalise the observations
-    normalised_observations   = observations - numpy.tile(mu, [J_i,1]) # (dim_d, J_i)
-
-    ### Expected value of the latent variables using the scalable solution
-    # Identity part first
-    sum_ft_beta_part          = numpy.zeros(self.m_dim_f)     # (dim_f)
-    for j in range(0, J_i):
-      current_observation     = normalised_observations[j,:]  # (dim_d)
-      sum_ft_beta_part        = sum_ft_beta_part + numpy.dot(ft_beta, current_observation)  # (dim_f)
-    h_i                       = numpy.dot(gamma, sum_ft_beta_part)                          # (dim_f)
-    # Reproject the identity part to work out the session parts
-    Fh_i                      = numpy.dot(F, h_i)                                           # (dim_d)
-    z_first_order = numpy.zeros((J_i, self.m_dim_f+self.m_dim_g))
-    for j in range(0, J_i):
-      current_observation       = normalised_observations[j,:]                  # (dim_d)
-      w_ij                      = numpy.dot(alpha, G.transpose())               # (dim_g, dim_d)
-      w_ij                      = numpy.multiply(w_ij, isigma)                  # (dim_g, dim_d)
-      w_ij                      = numpy.dot(w_ij, (current_observation - Fh_i)) # (dim_g)
-      z_first_order[j,:]        = numpy.hstack([h_i,w_ij])                      # (dim_f+dim_g)
-
-    ### Calculate the expected value of the squared of the latent variables
-    # The constant matrix we use has the following parts: [top_left, top_right; bottom_left, bottom_right]
-    # P             = Inverse_I_plus_GTEG * G^T * Sigma^{-1} * F  (dim_g, dim_f)
-    # top_left      = gamma                                       (dim_f, dim_f)
-    # bottom_left   = top_right^T = P * gamma                     (dim_g, dim_f)
-    # bottom_right  = Inverse_I_plus_GTEG - bottom_left * P^T     (dim_g, dim_g)
-    top_left                 = gamma
-    P                        = numpy.dot(alpha, G.transpose())
-    P                        = numpy.dot(numpy.dot(P,numpy.diag(isigma)), F)
-    bottom_left              = -1 * numpy.dot(P, top_left)
-    top_right                = bottom_left.transpose()
-    bottom_right             = alpha -1 * numpy.dot(bottom_left, P.transpose())
-    constant_matrix          = numpy.bmat([[top_left,top_right],[bottom_left, bottom_right]])
-
-    # Now get the actual expected value
-    z_second_order = numpy.zeros((J_i, self.m_dim_f+self.m_dim_g, self.m_dim_f+self.m_dim_g))
-    for j in range(0, J_i):
-      z_second_order[j,:,:] = constant_matrix + numpy.outer(z_first_order[j,:],z_first_order[j,:])  # (dim_f+dim_g,dim_f+dim_g)
-
-    ### Return the first and second order statistics
-    return(z_first_order, z_second_order)
-
-  def e_step(self, machine, data):
-    self.m_sum_z_second_order.fill(0.)
-    for i in range(len(data)):
-      ### Get the observations for this label and the number of observations for this label.
-      observations_for_h_i      = data[i]
-      J_i                       = observations_for_h_i.shape[0]                           # An integer > 0
-
-      ### Gather the statistics for this identity and then separate them for each observation.
-      [z_first_order, z_second_order] = self.__compute_sufficient_statistics_given_observations__(machine, observations_for_h_i)
-      self.m_z_first_order[i]  = z_first_order
-      self.m_z_second_order[i] = z_second_order
-      J_i = len(z_second_order)
-      for j in range(0, J_i):
-        self.m_sum_z_second_order += z_second_order[j]
-
-  def __update_f_and_g__(self, machine, data):
-    ### Initialise the numerator and the denominator.
-    dim_d                          = machine.shape[0]
-    accumulated_B_numerator        = numpy.zeros((dim_d,self.m_dim_f+self.m_dim_g))
-    accumulated_B_denominator      = numpy.linalg.inv(self.m_sum_z_second_order)
-    mu                             = machine.mu
-
-    ### Go through and process on a per subjectid basis
-    for i in range(len(data)):
-      # Normalise the observations
-      J_i                       = data[i].shape[0]
-      normalised_observations   = data[i] - numpy.tile(mu, [J_i,1]) # (J_i, dim_d)
-
-      ### Gather the statistics for this label
-      z_first_order_i                    = self.m_z_first_order[i]  # List of (dim_f+dim_g) vectors
-
-      ### Accumulate for the B matrix for this identity (current_label).
-      for j in range(0, J_i):
-        current_observation_for_h_i   = normalised_observations[j,:]   # (dim_d)
-        accumulated_B_numerator       = accumulated_B_numerator + numpy.outer(current_observation_for_h_i, z_first_order_i[j,:])  # (dim_d, dim_f+dim_g);
-
-    ### Update the B matrix which we can then use this to update the F and G matrices.
-    B                                  = numpy.dot(accumulated_B_numerator,accumulated_B_denominator)
-    machine.f                          = B[:,0:self.m_dim_f].copy()
-    machine.g                          = B[:,self.m_dim_f:self.m_dim_f+self.m_dim_g].copy()
-
-  def __update_sigma__(self, machine, data):
-    ### Initialise the accumulated Sigma
-    dim_d                          = machine.shape[0]
-    mu                             = machine.mu
-    accumulated_sigma              = numpy.zeros(dim_d)   # An array (dim_d)
-    number_of_observations         = 0
-    B = numpy.hstack([machine.f, machine.g])
-
-    ### Go through and process on a per subjectid basis (based on the labels we were given.
-    for i in range(len(data)):
-      # Normalise the observations
-      J_i                       = data[i].shape[0]
-      normalised_observations   = data[i] - numpy.tile(mu, [J_i,1]) # (J_i, dim_d)
-
-      ### Gather the statistics for this identity and then separate them for each
-      ### observation.
-      z_first_order_i                    = self.m_z_first_order[i]  # List of (dim_f+dim_g) vectors
-
-      ### Accumulate for the sigma matrix, which will be diagonalised
-      for j in range(0, J_i):
-        current_observation_for_h_i   = normalised_observations[j,:]  # (dim_d)
-        left                          = current_observation_for_h_i * current_observation_for_h_i # (dim_d)
-        projected_direction           = numpy.dot(B, z_first_order_i[j,:])                        # (dim_d)
-        right                         = projected_direction * current_observation_for_h_i         # (dim_d)
-        accumulated_sigma             = accumulated_sigma + (left - right)                        # (dim_d)
-        number_of_observations        = number_of_observations + 1
-
-    ### Normalise by the number of observations (1/IJ)
-    machine.sigma                     = accumulated_sigma / number_of_observations;
-
-  def m_step(self, machine, data):
-    self.__update_f_and_g__(machine, data)
-    self.__update_sigma__(machine, data)
-    machine.__precompute__()
-
-  def finalize(self, machine, data):
-    machine.__precompute_log_like__()
-
-  def train(self, machine, data):
-    self.initialize(machine, data)
-    average_output_previous = -sys.maxsize
-    average_output = -sys.maxsize
-    self.e_step(machine, data)
-
-    i = 0
-    while True:
-      average_output_previous = average_output
-      self.m_step(machine, data)
-      self.e_step(machine, data)
-      if(self.m_max_iterations > 0 and i+1 >= self.m_max_iterations):
-        break
-      i += 1
-
-
-def test_plda_EM_vs_Python():
-
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  D = 7
-  nf = 2
-  ng = 3
-
-  # first identity (4 samples)
-  a = numpy.array([
-    [1,2,3,4,5,6,7],
-    [7,8,3,3,1,8,2],
-    [3,2,1,4,5,1,7],
-    [9,0,3,2,1,4,6],
-    ], dtype='float64')
-
-  # second identity (3 samples)
-  b = numpy.array([
-    [5,6,3,4,2,0,2],
-    [1,7,8,9,4,4,8],
-    [8,7,2,5,1,1,1],
-    ], dtype='float64')
-
-  # list of arrays (training data)
-  l = [a,b]
-
-  # initial values for F, G and sigma
-  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
-    -0.6249,  0.1021, -0.8658,
-    -1.1687,  1.1963,  0.1807,
-    0.3926,  0.1203,  1.2665,
-    1.3018, -1.0368, -0.2512,
-    -0.5936, -0.8571, -0.2046,
-    0.4364, -0.1699, -2.2015]).reshape(D,ng)
-
-  # F <-> PCA on G
-  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
-    0.596449127693018,  0.000000006265167,
-    0.298224563846509,  0.000000003132583,
-    0.447336845769764,  0.000000009397750,
-    -0.108445295944185, -0.000000001566292,
-    -0.501559493741856, -0.000000006265167,
-    -0.298224563846509, -0.000000003132583]).reshape(D,nf)
-  sigma_init = 0.01 * numpy.ones(D, 'float64')
-
-  # Runs the PLDA trainer EM-steps (2 steps)
-  # Defines base trainer and machine
-  t = PLDATrainer(10)
-  t_py = PythonPLDATrainer(max_iterations=10)
-  m = PLDABase(D,nf,ng)
-  m_py = PLDABase(D,nf,ng)
-
-  # Sets the same initialization methods
-  t.init_f_method = 'BETWEEN_SCATTER'
-  t.init_g_method = 'WITHIN_SCATTER'
-  t.init_sigma_method = 'VARIANCE_DATA'
-
-  t.train(m, l)
-  t_py.train(m_py, l)
-  assert numpy.allclose(m.mu, m_py.mu)
-  assert numpy.allclose(m.f, m_py.f)
-  assert numpy.allclose(m.g, m_py.g)
-  assert numpy.allclose(m.sigma, m_py.sigma)
-
-
-def test_plda_EM_vs_Prince():
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  dim_d = 7
-  dim_f = 2
-  dim_g = 3
-
-  # first identity (4 samples)
-  a = numpy.array([
-    [1,2,3,4,5,6,7],
-    [7,8,3,3,1,8,2],
-    [3,2,1,4,5,1,7],
-    [9,0,3,2,1,4,6],
-    ], dtype='float64')
-
-  # second identity (3 samples)
-  b = numpy.array([
-    [5,6,3,4,2,0,2],
-    [1,7,8,9,4,4,8],
-    [8,7,2,5,1,1,1],
-    ], dtype='float64')
-
-  # list of arrays (training data)
-  l = [a,b]
-
-  # initial values for F, G and sigma
-  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
-    -0.6249,  0.1021, -0.8658,
-    -1.1687,  1.1963,  0.1807,
-    0.3926,  0.1203,  1.2665,
-    1.3018, -1.0368, -0.2512,
-    -0.5936, -0.8571, -0.2046,
-    0.4364, -0.1699, -2.2015]).reshape(dim_d,dim_g)
-
-  # F <-> PCA on G
-  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
-    0.596449127693018,  0.000000006265167,
-    0.298224563846509,  0.000000003132583,
-    0.447336845769764,  0.000000009397750,
-    -0.108445295944185, -0.000000001566292,
-    -0.501559493741856, -0.000000006265167,
-    -0.298224563846509, -0.000000003132583]).reshape(dim_d,dim_f)
-  sigma_init = 0.01 * numpy.ones(dim_d, 'float64')
-
-  # Defines reference results based on Princes'matlab implementation
-  # After 1 iteration
-  z_first_order_a_1 = numpy.array(
-    [-2.624115900658397, -0.000000034277848,  1.554823055585319,  0.627476234024656, -0.264705934182394,
-     -2.624115900658397, -0.000000034277848, -2.703482671599357, -1.533283607433197,  0.553725774828231,
-     -2.624115900658397, -0.000000034277848,  2.311647528461115,  1.266362142140170, -0.317378177105131,
-     -2.624115900658397, -0.000000034277848, -1.163402640008200, -0.372604542926019,  0.025152800097991
-    ]).reshape(4, dim_f+dim_g)
-  z_first_order_b_1 = numpy.array(
-    [ 3.494168818797438,  0.000000045643026,  0.111295550530958, -0.029241422535725,  0.257045446451067,
-      3.494168818797438,  0.000000045643026,  1.102110715965762,  1.481232954001794, -0.970661225144399,
-      3.494168818797438,  0.000000045643026, -1.212854031699468, -1.435946529317718,  0.717884143973377
-    ]).reshape(3, dim_f+dim_g)
-
-  z_second_order_sum_1 = numpy.array(
-    [64.203518285366087,  0.000000747228248,  0.002703277337642,  0.078542842475345,  0.020894328259862,
-      0.000000747228248,  6.999999999999980, -0.000000003955962,  0.000000002017232, -0.000000003741593,
-      0.002703277337642, -0.000000003955962, 19.136889380923918, 11.860493771107487, -4.584339465366988,
-      0.078542842475345,  0.000000002017232, 11.860493771107487,  8.771502339750128, -3.905706024997424,
-      0.020894328259862, -0.000000003741593, -4.584339465366988, -3.905706024997424,  2.011924970338584
-    ]).reshape(dim_f+dim_g, dim_f+dim_g)
-
-  sigma_1 = numpy.array(
-      [2.193659969999207, 3.748361365521041, 0.237835235737085,
-        0.558546035892629, 0.209272700958400, 1.717782807724451,
-        0.248414618308223])
-
-  F_1 = numpy.array(
-      [-0.059083416465692,  0.000000000751007,
-        0.600133217253169,  0.000000006957266,
-        0.302789123922871,  0.000000000218947,
-        0.454540641429714,  0.000000003342540,
-        -0.106608957780613, -0.000000001641389,
-        -0.494267694269430, -0.000000011059552,
-        -0.295956102084270, -0.000000006718366]).reshape(dim_d,dim_f)
-
-  G_1 = numpy.array(
-      [-1.836166150865047,  2.491475145758734,  5.095958946372235,
-        -0.608732205531767, -0.618128420353493, -1.085423135463635,
-        -0.697390472635929, -1.047900122276840, -6.080211153116984,
-        0.769509301515319, -2.763610156675313, -5.972172587527176,
-        1.332474692714491, -1.368103875407414, -2.096382536513033,
-        0.304135903830416, -5.168096082564016, -9.604769461465978,
-        0.597445549865284, -1.347101803379971, -5.900246013340080]).reshape(dim_d,dim_g)
-
-  # After 2 iterations
-  z_first_order_a_2 = numpy.array(
-      [-2.144344161196005, -0.000000027851878,  1.217776189037369,  0.232492571855061, -0.212892893868819,
-        -2.144344161196005, -0.000000027851878, -2.382647766948079, -1.759951013670071,  0.587213207926731,
-        -2.144344161196005, -0.000000027851878,  2.143294830538722,  0.909307594408923, -0.183752098508072,
-        -2.144344161196005, -0.000000027851878, -0.662558006326892,  0.717992497547010, -0.202897892977004
-    ]).reshape(4, dim_f+dim_g)
-  z_first_order_b_2 = numpy.array(
-      [ 2.695117129662246,  0.000000035005543, -0.156173294945791, -0.123083763746364,  0.271123341933619,
-        2.695117129662246,  0.000000035005543,  0.690321563509753,  0.944473716646212, -0.850835940962492,
-        2.695117129662246,  0.000000035005543, -0.930970138998433, -0.949736472690315,  0.594216348861889
-    ]).reshape(3, dim_f+dim_g)
-
-  z_second_order_sum_2 = numpy.array(
-      [41.602421167226410,  0.000000449434708, -1.513391506933811, -0.477818674270533,  0.059260102368316,
-        0.000000449434708,  7.000000000000005, -0.000000023255959, -0.000000005157439, -0.000000003230262,
-        -1.513391506933810, -0.000000023255959, 14.399631061987494,  8.068678077509025, -3.227586434905497,
-        -0.477818674270533, -0.000000005157439,  8.068678077509025,  7.263248678863863, -3.060665688064639,
-        0.059260102368316, -0.000000003230262, -3.227586434905497, -3.060665688064639,  1.705174220723198
-    ]).reshape(dim_f+dim_g, dim_f+dim_g)
-
-  sigma_2 = numpy.array(
-    [1.120493935052524, 1.777598857891599, 0.197579528599150,
-      0.407657093211478, 0.166216300651473, 1.044336960403809,
-      0.287856936559308])
-
-  F_2 = numpy.array(
-    [-0.111956311978966,  0.000000000781025,
-      0.702502767389263,  0.000000007683917,
-      0.337823622542517,  0.000000000637302,
-      0.551363737526339,  0.000000004854293,
-     -0.096561040511417, -0.000000001716011,
-     -0.661587484803602, -0.000000012394362,
-     -0.346593051621620, -0.000000007134046]).reshape(dim_d,dim_f)
-
-  G_2 = numpy.array(
-    [-2.266404374274820,  4.089199685832099,  7.023039382876370,
-      0.094887459097613, -3.226829318470136, -3.452279917194724,
-     -0.498398131733141, -1.651712333649899, -6.548008210704172,
-      0.574932298590327, -2.198978667003715, -5.131253543126156,
-      1.415857426810629, -1.627795701160212, -2.509013676007012,
-     -0.543552834305580, -3.215063993186718, -7.006305082499653,
-      0.562108137758111, -0.785296641855087, -5.318335345720314]).reshape(dim_d,dim_g)
-
-  # Runs the PLDA trainer EM-steps (2 steps)
-
-  # Defines base trainer and machine
-  t = PLDATrainer()
-  t0 = PLDATrainer(t)
-  m = PLDABase(dim_d,dim_f,dim_g)
-  t.initialize(m,l)
-  m.sigma = sigma_init
-  m.g = G_init
-  m.f = F_init
-
-  # Defines base trainer and machine (for Python implementation
-  t_py = PythonPLDATrainer()
-  m_py = PLDABase(dim_d,dim_f,dim_g)
-  t_py.initialize(m_py,l)
-  m_py.sigma = sigma_init
-  m_py.g = G_init
-  m_py.f = F_init
-
-  # E-step 1
-  t.e_step(m,l)
-  t_py.e_step(m_py,l)
-  # Compares statistics to Prince matlab reference
-  assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
-  assert numpy.allclose(t.z_first_order[1], z_first_order_b_1, 1e-10)
-  assert numpy.allclose(t.z_second_order_sum, z_second_order_sum_1, 1e-10)
-  # Compares statistics against the ones of the python implementation
-  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
-  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
-  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
-
-  # M-step 1
-  t.m_step(m,l)
-  t_py.m_step(m_py,l)
-  # Compares F, G and sigma to Prince matlab reference
-  assert numpy.allclose(m.f, F_1, 1e-10)
-  assert numpy.allclose(m.g, G_1, 1e-10)
-  assert numpy.allclose(m.sigma, sigma_1, 1e-10)
-  # Compares F, G and sigma to the ones of the python implementation
-  assert numpy.allclose(m.f, m_py.f, 1e-10)
-  assert numpy.allclose(m.g, m_py.g, 1e-10)
-  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
-
-  # E-step 2
-  t.e_step(m,l)
-  t_py.e_step(m_py,l)
-  # Compares statistics to Prince matlab reference
-  assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
-  assert numpy.allclose(t.z_first_order[1], z_first_order_b_2, 1e-10)
-  assert numpy.allclose(t.z_second_order_sum, z_second_order_sum_2, 1e-10)
-  # Compares statistics against the ones of the python implementation
-  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
-  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
-  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
-
-  # M-step 2
-  t.m_step(m,l)
-  t_py.m_step(m_py,l)
-  # Compares F, G and sigma to Prince matlab reference
-  assert numpy.allclose(m.f, F_2, 1e-10)
-  assert numpy.allclose(m.g, G_2, 1e-10)
-  assert numpy.allclose(m.sigma, sigma_2, 1e-10)
-  # Compares F, G and sigma to the ones of the python implementation
-  assert numpy.allclose(m.f, m_py.f, 1e-10)
-  assert numpy.allclose(m.g, m_py.g, 1e-10)
-  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
-
-
-  # Test the second order statistics computation
-  # Calls the initialization methods and resets randomly initialized values
-  # to new reference ones (to make the tests deterministic)
-  t.use_sum_second_order = False
-  t.initialize(m,l)
-  m.sigma = sigma_init
-  m.g = G_init
-  m.f = F_init
-  t_py.initialize(m_py,l)
-  m_py.sigma = sigma_init
-  m_py.g = G_init
-  m_py.f = F_init
-
-  # E-step 1
-  t.e_step(m,l)
-  t_py.e_step(m_py,l)
-  # Compares statistics to Prince matlab reference
-  assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
-  assert numpy.allclose(t.z_first_order[1], z_first_order_b_1, 1e-10)
-  # Compares statistics against the ones of the python implementation
-  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
-  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
-  assert numpy.allclose(t.z_second_order[0], t_py.m_z_second_order[0], 1e-10)
-  assert numpy.allclose(t.z_second_order[1], t_py.m_z_second_order[1], 1e-10)
-  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
-
-  # M-step 1
-  t.m_step(m,l)
-  t_py.m_step(m_py,l)
-  # Compares F, G and sigma to the ones of the python implementation
-  assert numpy.allclose(m.f, m_py.f, 1e-10)
-  assert numpy.allclose(m.g, m_py.g, 1e-10)
-  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
-
-  # E-step 2
-  t.e_step(m,l)
-  t_py.e_step(m_py,l)
-  # Compares statistics to Prince matlab reference
-  assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
-  assert numpy.allclose(t.z_first_order[1], z_first_order_b_2, 1e-10)
-  # Compares statistics against the ones of the python implementation
-  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
-  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
-  assert numpy.allclose(t.z_second_order[0], t_py.m_z_second_order[0], 1e-10)
-  assert numpy.allclose(t.z_second_order[1], t_py.m_z_second_order[1], 1e-10)
-  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
-
-  # M-step 2
-  t.m_step(m,l)
-  t_py.m_step(m_py,l)
-  # Compares F, G and sigma to the ones of the python implementation
-  assert numpy.allclose(m.f, m_py.f, 1e-10)
-  assert numpy.allclose(m.g, m_py.g, 1e-10)
-  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
-
-
-def test_plda_enrollment():
-  # Data used for performing the tests
-  # Features and subspaces dimensionality
-  dim_d = 7
-  dim_f = 2
-  dim_g = 3
-
-  # initial values for F, G and sigma
-  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
-    -0.6249,  0.1021, -0.8658,
-    -1.1687,  1.1963,  0.1807,
-    0.3926,  0.1203,  1.2665,
-    1.3018, -1.0368, -0.2512,
-    -0.5936, -0.8571, -0.2046,
-    0.4364, -0.1699, -2.2015]).reshape(dim_d,dim_g)
-  # F <-> PCA on G
-  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
-    0.596449127693018,  0.000000006265167,
-    0.298224563846509,  0.000000003132583,
-    0.447336845769764,  0.000000009397750,
-    -0.108445295944185, -0.000000001566292,
-    -0.501559493741856, -0.000000006265167,
-    -0.298224563846509, -0.000000003132583]).reshape(dim_d,dim_f)
-  sigma_init = 0.01 * numpy.ones((dim_d,), 'float64')
-  mean_zero = numpy.zeros((dim_d,), 'float64')
-
-  # base machine
-  mb = PLDABase(dim_d,dim_f,dim_g)
-  mb.sigma = sigma_init
-  mb.g = G_init
-  mb.f = F_init
-  mb.mu = mean_zero
-
-  # Data for likelihood computation
-  x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
-  x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
-  x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
-  a_enrol = []
-  a_enrol.append(x1)
-  a_enrol.append(x2)
-  a_enrol = numpy.array(a_enrol)
-
-  # reference likelihood from Prince implementation
-  ll_ref = -182.8880743535197
-
-  # Computes the likelihood using x1 and x2 as enrollment samples
-  # and x3 as a probe sample
-  m = PLDAMachine(mb)
-  t = PLDATrainer()
-  t.enrol(m, a_enrol)
-  ll = m.compute_log_likelihood(x3)
-  
-  assert abs(ll - ll_ref) < 1e-10
-
-  # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
-  # and [x3] separately
-  llr_ref = -4.43695386675
-  llr = m(x3)
-  assert abs(llr - llr_ref) < 1e-10
-  #
-  llr_separate = m.compute_log_likelihood(numpy.array([x1,x2,x3]), False) - \
-    (m.compute_log_likelihood(numpy.array([x1,x2]), False) + m.compute_log_likelihood(numpy.array([x3]), False))
-  assert abs(llr - llr_separate) < 1e-10
-
-
-
-def test_plda_comparisons():
-
-  t1 = PLDATrainer()
-  t2 = PLDATrainer()
-
-  #t2.rng = t1.rng
-
-  assert t1 == t2
-  assert (t1 != t2 ) is False
-  assert t1.is_similar_to(t2)
-
-  training_set = [numpy.array([[1,2,3,4]], numpy.float64), numpy.array([[3,4,3,4]], numpy.float64)]
-  m = PLDABase(4,1,1,1e-8)
-  t1.rng.seed(37)
-  t1.initialize(m, training_set)
-  t1.e_step(m, training_set)
-  t1.m_step(m, training_set)
-  assert (t1 == t2 ) is False
-  assert t1 != t2
-  assert (t1.is_similar_to(t2) ) is False
-  t2.rng.seed(37)
-  t2.initialize(m, training_set)
-  t2.e_step(m, training_set)
-  t2.m_step(m, training_set)
-  assert t1 == t2
-  assert (t1 != t2 ) is False
-  assert t1.is_similar_to(t2)
-  t2.rng.seed(77)
-  t2.initialize(m, training_set)
-  t2.e_step(m, training_set)
-  t2.m_step(m, training_set)
-  assert (t1 == t2 ) is False
-  assert t1 != t2
-  assert (t1.is_similar_to(t2) ) is False
-
-  
diff --git a/bob/learn/misc/test_ztnorm.py b/bob/learn/misc/test_ztnorm.py
deleted file mode 100644
index ee74a446998feded7ff624c5645db1eb4c240675..0000000000000000000000000000000000000000
--- a/bob/learn/misc/test_ztnorm.py
+++ /dev/null
@@ -1,122 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Francois Moulin <Francois.Moulin@idiap.ch>
-# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
-# Tue Jul 19 15:33:20 2011 +0200
-#
-# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
-
-"""Tests on the ZTNorm function
-"""
-
-import numpy
-
-from bob.io.base.test_utils import datafile
-import bob.io.base
-
-#from . import znorm, tnorm, ztnorm
-import bob.learn.misc
-
-def sameValue(vect_A, vect_B):
-  sameMatrix = numpy.zeros((vect_A.shape[0], vect_B.shape[0]), 'bool')
-
-  for j in range(vect_A.shape[0]):
-    for i in range(vect_B.shape[0]):
-      sameMatrix[j, i] = (vect_A[j] == vect_B[i])
-
-  return sameMatrix
-
-def tnorm(A, C):
-  Cmean = numpy.mean(C, axis=0)
-  if C.shape[1] > 1:
-    Cstd = numpy.sqrt(numpy.sum((C - numpy.tile(Cmean.reshape(1,C.shape[1]), (C.shape[0],1))) ** 2, axis=0) / (C.shape[0]-1))
-  else:
-    Cstd = numpy.ones(shape=(C.shape[1],), dtype=numpy.float64)
-  return (A - numpy.tile(Cmean.reshape(1,C.shape[1]), (A.shape[0],1))) / numpy.tile(Cstd.reshape(1,C.shape[1]), (A.shape[0],1))
-
-def znorm(A, B):
-  Bmean = numpy.mean(B, axis=1)
-  if B.shape[1] > 1:
-    Bstd = numpy.sqrt(numpy.sum((B - numpy.tile(Bmean.reshape(B.shape[0],1), (1,B.shape[1]))) ** 2, axis=1) / (B.shape[1]-1))
-  else:
-    Bstd = numpy.ones(shape=(B.shape[0],), dtype=numpy.float64)
-
-  return (A - numpy.tile(Bmean.reshape(B.shape[0],1), (1,A.shape[1]))) / numpy.tile(Bstd.reshape(B.shape[0],1), (1,A.shape[1]))
-
-
-def test_ztnorm_simple():
-  # 3x5
-  my_A = numpy.array([[1, 2, 3, 4, 5],
-                      [6, 7, 8, 9, 8],
-                      [7, 6, 5, 4, 3]],'float64')
-  # 3x4
-  my_B = numpy.array([[5, 4, 7, 8],[9, 8, 7, 4],[5, 6, 3, 2]],'float64')
-  # 2x5
-  my_C = numpy.array([[5, 4, 3, 2, 1],[2, 1, 2, 3, 4]],'float64')
-  # 2x4
-  my_D = numpy.array([[8, 6, 4, 2],[0, 2, 4, 6]],'float64')
-
-  # 4x1
-  znorm_id = numpy.array([1, 2, 3, 4],'uint32')
-  # 2x1
-  tnorm_id = numpy.array([1, 5],'uint32')
-  
-  scores = bob.learn.misc.ztnorm(my_A, my_B, my_C, my_D,
-      sameValue(tnorm_id, znorm_id))
-
-  ref_scores = numpy.array([[-4.45473107e+00, -3.29289322e+00, -1.50519101e+01, -8.42086557e-01, 6.46544511e-03], [-8.27619927e-01,  7.07106781e-01,  1.13757710e+01,  2.01641412e+00, 7.63765080e-01], [ 2.52913570e+00,  2.70710678e+00,  1.24400233e+01,  7.07106781e-01, 6.46544511e-03]], 'float64')
-
-  assert (abs(scores - ref_scores) < 1e-7).all()
-
-def test_ztnorm_big():
-  my_A = bob.io.base.load(datafile("ztnorm_eval_eval.hdf5", __name__))
-  my_B = bob.io.base.load(datafile("ztnorm_znorm_eval.hdf5", __name__))
-  my_C = bob.io.base.load(datafile("ztnorm_eval_tnorm.hdf5", __name__))
-  my_D = bob.io.base.load(datafile("ztnorm_znorm_tnorm.hdf5", __name__))
-
-  # ZT-Norm
-  ref_scores = bob.io.base.load(datafile("ztnorm_result.hdf5", __name__))
-  scores = bob.learn.misc.ztnorm(my_A, my_B, my_C, my_D)
-  assert (abs(scores - ref_scores) < 1e-7).all()
-
-  # T-Norm
-  scores = tnorm(my_A, my_C)
-  scores_py = tnorm(my_A, my_C)
-  assert (abs(scores - scores_py) < 1e-7).all()
-
-  # Z-Norm
-  scores = znorm(my_A, my_B)
-  scores_py = znorm(my_A, my_B)
-  assert (abs(scores - scores_py) < 1e-7).all()
-
-def test_tnorm_simple():
-  # 3x5
-  my_A = numpy.array([[1, 2, 3, 4, 5],
-                      [6, 7, 8, 9, 8],
-                      [7, 6, 5, 4, 3]],'float64')
-  # 2x5
-  my_C = numpy.array([[5, 4, 3, 2, 1],[2, 1, 2, 3, 4]],'float64')
-
-  zC = tnorm(my_A, my_C)
-  zC_py = tnorm(my_A, my_C)
-  assert (abs(zC - zC_py) < 1e-7).all()
-
-  empty = numpy.zeros(shape=(0,0), dtype=numpy.float64)
-  zC = bob.learn.misc.ztnorm(my_A, empty, my_C, empty)
-  assert (abs(zC - zC_py) < 1e-7).all()
-
-def test_znorm_simple():
-  # 3x5
-  my_A = numpy.array([[1, 2, 3, 4, 5],
-                      [6, 7, 8, 9, 8],
-                      [7, 6, 5, 4, 3]], numpy.float64)
-  # 3x4
-  my_B = numpy.array([[5, 4, 7, 8],[9, 8, 7, 4],[5, 6, 3, 2]], numpy.float64)
-
-  zA = znorm(my_A, my_B)
-  zA_py = znorm(my_A, my_B)
-  assert (abs(zA - zA_py) < 1e-7).all()
-
-  empty = numpy.zeros(shape=(0,0), dtype=numpy.float64)
-  zA = bob.learn.misc.ztnorm(my_A, my_B, empty, empty)
-  assert (abs(zA - zA_py) < 1e-7).all()
diff --git a/bob/learn/misc/version.cpp b/bob/learn/misc/version.cpp
deleted file mode 100644
index d3c36a9a80865f123deb2a1b21b7fb70116749bb..0000000000000000000000000000000000000000
--- a/bob/learn/misc/version.cpp
+++ /dev/null
@@ -1,224 +0,0 @@
-/**
- * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
- * @date Mon Apr 14 20:43:48 CEST 2014
- *
- * @brief Binds configuration information available from bob
- */
-
-#ifdef NO_IMPORT_ARRAY
-#undef NO_IMPORT_ARRAY
-#endif
-#include <bob.blitz/capi.h>
-#include <bob.blitz/cleanup.h>
-
-#include <bob.core/config.h>
-#include <bob.io.base/config.h>
-#include <bob.sp/config.h>
-#include <bob.math/config.h>
-#include <bob.learn.activation/config.h>
-#include <bob.learn.linear/config.h>
-// TODO: add other dependencies
-
-#include <string>
-#include <cstdlib>
-#include <blitz/blitz.h>
-#include <boost/preprocessor/stringize.hpp>
-#include <boost/version.hpp>
-#include <boost/format.hpp>
-
-
-static int dict_set(PyObject* d, const char* key, const char* value) {
-  PyObject* v = Py_BuildValue("s", value);
-  if (!v) return 0;
-  int retval = PyDict_SetItemString(d, key, v);
-  Py_DECREF(v);
-  if (retval == 0) return 1; //all good
-  return 0; //a problem occurred
-}
-
-static int dict_steal(PyObject* d, const char* key, PyObject* value) {
-  if (!value) return 0;
-  int retval = PyDict_SetItemString(d, key, value);
-  Py_DECREF(value);
-  if (retval == 0) return 1; //all good
-  return 0; //a problem occurred
-}
-
-/**
- * Describes the version of Boost libraries installed
- */
-static PyObject* boost_version() {
-  boost::format f("%d.%d.%d");
-  f % (BOOST_VERSION / 100000);
-  f % (BOOST_VERSION / 100 % 1000);
-  f % (BOOST_VERSION % 100);
-  return Py_BuildValue("s", f.str().c_str());
-}
-
-/**
- * Describes the compiler version
- */
-static PyObject* compiler_version() {
-# if defined(__GNUC__) && !defined(__llvm__)
-  boost::format f("%s.%s.%s");
-  f % BOOST_PP_STRINGIZE(__GNUC__);
-  f % BOOST_PP_STRINGIZE(__GNUC_MINOR__);
-  f % BOOST_PP_STRINGIZE(__GNUC_PATCHLEVEL__);
-  return Py_BuildValue("ss", "gcc", f.str().c_str());
-# elif defined(__llvm__) && !defined(__clang__)
-  return Py_BuildValue("ss", "llvm-gcc", __VERSION__);
-# elif defined(__clang__)
-  return Py_BuildValue("ss", "clang", __clang_version__);
-# else
-  return Py_BuildValue("s", "unsupported");
-# endif
-}
-
-/**
- * Python version with which we compiled the extensions
- */
-static PyObject* python_version() {
-  boost::format f("%s.%s.%s");
-  f % BOOST_PP_STRINGIZE(PY_MAJOR_VERSION);
-  f % BOOST_PP_STRINGIZE(PY_MINOR_VERSION);
-  f % BOOST_PP_STRINGIZE(PY_MICRO_VERSION);
-  return Py_BuildValue("s", f.str().c_str());
-}
-
-/**
- * Numpy version
- */
-static PyObject* numpy_version() {
-  return Py_BuildValue("{ssss}", "abi", BOOST_PP_STRINGIZE(NPY_VERSION),
-      "api", BOOST_PP_STRINGIZE(NPY_API_VERSION));
-}
-
-/**
- * bob.blitz c/c++ api version
- */
-static PyObject* bob_blitz_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_BLITZ_API_VERSION));
-}
-
-/**
- * bob.core c/c++ api version
- */
-static PyObject* bob_core_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_CORE_API_VERSION));
-}
-
-/**
- * bob.io.base c/c++ api version
- */
-static PyObject* bob_io_base_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_IO_BASE_API_VERSION));
-}
-
-/**
- * bob.sp c/c++ api version
- */
-static PyObject* bob_sp_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_SP_API_VERSION));
-}
-
-/**
- * bob.math c/c++ api version
- */
-static PyObject* bob_math_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_MATH_API_VERSION));
-}
-
-/**
- * bob.learn.activation c/c++ api version
- */
-static PyObject* bob_learn_activation_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_LEARN_ACTIVATION_API_VERSION));
-}
-
-/**
- * bob.learn.linear c/c++ api version
- */
-static PyObject* bob_learn_linear_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_LEARN_LINEAR_API_VERSION));
-}
-
-
-static PyObject* build_version_dictionary() {
-
-  PyObject* retval = PyDict_New();
-  if (!retval) return 0;
-  auto retval_ = make_safe(retval);
-
-  if (!dict_set(retval, "Blitz++", BZ_VERSION)) return 0;
-  if (!dict_steal(retval, "Boost", boost_version())) return 0;
-  if (!dict_steal(retval, "Compiler", compiler_version())) return 0;
-  if (!dict_steal(retval, "Python", python_version())) return 0;
-  if (!dict_steal(retval, "NumPy", numpy_version())) return 0;
-  if (!dict_steal(retval, "bob.blitz", bob_blitz_version())) return 0;
-  if (!dict_steal(retval, "bob.core", bob_core_version())) return 0;
-  if (!dict_steal(retval, "bob.io.base", bob_io_base_version())) return 0;
-  if (!dict_steal(retval, "bob.sp", bob_sp_version())) return 0;
-  if (!dict_steal(retval, "bob.math", bob_math_version())) return 0;
-  if (!dict_steal(retval, "bob.learn.activation", bob_learn_activation_version())) return 0;
-  if (!dict_steal(retval, "bob.learn.linear", bob_learn_linear_version())) return 0;
-  if (!dict_steal(retval, "Bob", bob_core_version())) return 0;
-
-  Py_INCREF(retval);
-  return retval;
-}
-
-static PyMethodDef module_methods[] = {
-    {0}  /* Sentinel */
-};
-
-PyDoc_STRVAR(module_docstr,
-"Information about software used to compile the C++ Bob API"
-);
-
-#if PY_VERSION_HEX >= 0x03000000
-static PyModuleDef module_definition = {
-  PyModuleDef_HEAD_INIT,
-  BOB_EXT_MODULE_NAME,
-  module_docstr,
-  -1,
-  module_methods,
-  0, 0, 0, 0
-};
-#endif
-
-static PyObject* create_module (void) {
-
-# if PY_VERSION_HEX >= 0x03000000
-  PyObject* m = PyModule_Create(&module_definition);
-# else
-  PyObject* m = Py_InitModule3(BOB_EXT_MODULE_NAME, module_methods, module_docstr);
-# endif
-  if (!m) return 0;
-  auto m_ = make_safe(m); ///< protects against early returns
-
-  /* register version numbers and constants */
-  if (PyModule_AddStringConstant(m, "module", BOB_EXT_MODULE_VERSION) < 0)
-    return 0;
-
-  PyObject* externals = build_version_dictionary();
-  if (!externals) return 0;
-  if (PyModule_AddObject(m, "externals", externals) < 0) return 0;
-
-  /* imports dependencies */
-  if (import_bob_blitz() < 0) {
-    PyErr_Print();
-    PyErr_Format(PyExc_ImportError, "cannot import `%s'", BOB_EXT_MODULE_NAME);
-    return 0;
-  }
-
-  Py_INCREF(m);
-  return m;
-
-}
-
-PyMODINIT_FUNC BOB_EXT_ENTRY_NAME (void) {
-# if PY_VERSION_HEX >= 0x03000000
-  return
-# endif
-    create_module();
-}
diff --git a/bob/learn/misc/ztnorm.cpp b/bob/learn/misc/ztnorm.cpp
deleted file mode 100644
index 9e2c6ea61ef01dcd1407353fa0fdba22f54904f9..0000000000000000000000000000000000000000
--- a/bob/learn/misc/ztnorm.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Sat 31 Jan 02:46:48 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-
-/*** zt_norm ***/
-static auto zt_norm = bob::extension::FunctionDoc(
-  "ztnorm",
-  "",
-  0,
-  true
-)
-.add_prototype("rawscores_probes_vs_models,rawscores_zprobes_vs_models,rawscores_probes_vs_tmodels,rawscores_zprobes_vs_tmodels,mask_zprobes_vs_tmodels_istruetrial", "output")
-.add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
-.add_parameter("rawscores_zprobes_vs_models", "array_like <float, 2D>", "")
-.add_parameter("rawscores_probes_vs_tmodels", "array_like <float, 2D>", "")
-.add_parameter("rawscores_zprobes_vs_tmodels", "array_like <float, 2D>", "")
-.add_parameter("mask_zprobes_vs_tmodels_istruetrial", "array_like <float, 2D>", "")
-.add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnMisc_ztNorm(PyObject*, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = zt_norm.kwlist(0);
-  
-  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_zprobes_vs_models_o, *rawscores_probes_vs_tmodels_o, 
-  *rawscores_zprobes_vs_tmodels_o, *mask_zprobes_vs_tmodels_istruetrial_o;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O&|O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_models_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_probes_vs_tmodels_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_tmodels_o,
-                                                                       &PyBlitzArray_Converter, &mask_zprobes_vs_tmodels_istruetrial_o)){
-    zt_norm.print_usage();
-    Py_RETURN_NONE;
-  }
-
-  // get the number of command line arguments
-  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
-  auto rawscores_zprobes_vs_models_         = make_safe(rawscores_zprobes_vs_models_o);
-  auto rawscores_probes_vs_tmodels_         = make_safe(rawscores_probes_vs_tmodels_o);
-  auto rawscores_zprobes_vs_tmodels_        = make_safe(rawscores_zprobes_vs_tmodels_o);
-  //auto mask_zprobes_vs_tmodels_istruetrial_ = make_safe(mask_zprobes_vs_tmodels_istruetrial_o);
-
-  blitz::Array<double,2>  rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
-  blitz::Array<double,2> normalized_scores = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
-
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs==4)
-    bob::learn::misc::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o),
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o),
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o),
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_tmodels_o),
-                             normalized_scores);
-  else
-    bob::learn::misc::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o), 
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o), 
-                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_tmodels_o), 
-                             *PyBlitzArrayCxx_AsBlitz<bool,2>(mask_zprobes_vs_tmodels_istruetrial_o),
-                             normalized_scores);
-
-  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
-}
-
-
-
-/*** t_norm ***/
-static auto t_norm = bob::extension::FunctionDoc(
-  "tnorm",
-  "",
-  0,
-  true
-)
-.add_prototype("rawscores_probes_vs_models,rawscores_probes_vs_tmodels", "output")
-.add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
-.add_parameter("rawscores_probes_vs_tmodels", "array_like <float, 2D>", "")
-.add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnMisc_tNorm(PyObject*, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = zt_norm.kwlist(0);
-  
-  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_probes_vs_tmodels_o;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_probes_vs_tmodels_o)){
-    zt_norm.print_usage();
-    Py_RETURN_NONE;
-  }
-  
-  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
-  auto rawscores_probes_vs_tmodels_         = make_safe(rawscores_probes_vs_tmodels_o);
-
-  blitz::Array<double,2>  rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
-  blitz::Array<double,2> normalized_scores = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
-
-  bob::learn::misc::tNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
-                           *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o),
-                           normalized_scores);
-
-  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
-}
-
-
-/*** z_norm ***/
-static auto z_norm = bob::extension::FunctionDoc(
-  "znorm",
-  "",
-  0,
-  true
-)
-.add_prototype("rawscores_probes_vs_models,rawscores_zprobes_vs_models", "output")
-.add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
-.add_parameter("rawscores_zprobes_vs_models", "array_like <float, 2D>", "")
-.add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnMisc_zNorm(PyObject*, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = zt_norm.kwlist(0);
-  
-  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_zprobes_vs_models_o;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
-                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_models_o)){
-    zt_norm.print_usage();
-    Py_RETURN_NONE;
-  }
-  
-  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
-  auto rawscores_zprobes_vs_models_         = make_safe(rawscores_zprobes_vs_models_o);
-
-  blitz::Array<double,2> rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
-  blitz::Array<double,2> normalized_scores          = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
-
-
-  bob::learn::misc::zNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
-                           *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o),
-                           normalized_scores);
-
-  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
-}
-
diff --git a/buildout.cfg b/buildout.cfg
index f8fe5d99356b987f948868209ca33b3be0804182..be276cd9e30873b0671c777477d2826f0a951f07 100644
--- a/buildout.cfg
+++ b/buildout.cfg
@@ -4,7 +4,7 @@
 
 [buildout]
 parts = scripts
-eggs = bob.learn.misc
+eggs = bob.learn.em
 extensions = bob.buildout
 ;             mr.developer
 
diff --git a/doc/conf.py b/doc/conf.py
index 1324990089d0a68a336aba6b813e23928abdfb9f..0317ae8fbae1d3e243e65ae0f899d32c72e96ca0 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -58,12 +58,12 @@ source_suffix = '.rst'
 master_doc = 'index'
 
 # General information about the project.
-project = u'bob.learn.misc'
+project = u'bob.learn.em'
 import time
 copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
 
 # Grab the setup entry
-distribution = pkg_resources.require('bob.learn.misc')[0]
+distribution = pkg_resources.require('bob.learn.em')[0]
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -129,7 +129,7 @@ if sphinx.__version__ >= "1.0":
 #html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = 'bob_learn_misc'
+#html_short_title = 'bob_learn_em'
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -187,7 +187,7 @@ html_favicon = 'img/favicon.ico'
 #html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'bob_learn_misc_doc'
+htmlhelp_basename = 'bob_learn_em_doc'
 
 
 # -- Options for LaTeX output --------------------------------------------------
@@ -201,7 +201,7 @@ latex_font_size = '10pt'
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'bob_learn_misc.tex', u'Bob Miscellaneous Machine Learning Tools',
+  ('index', 'bob_learn_em.tex', u'Bob Miscellaneous Machine Learning Tools',
    u'Biometrics Group, Idiap Research Institute', 'manual'),
 ]
 
@@ -241,7 +241,7 @@ rst_epilog = """
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    ('index', 'bob_learn_misc', u'Bob Miscellaneous Machine Learning Tools', [u'Idiap Research Institute'], 1)
+    ('index', 'bob_learn_em', u'Bob Miscellaneous Machine Learning Tools', [u'Idiap Research Institute'], 1)
 ]
 
 # Default processing flags for sphinx
diff --git a/doc/guide.rst b/doc/guide.rst
index 7010dd6576bc35eed8b6e6acb32a71551dcec347..b549affb7177b568a890c3fa058cb95aade2e5c6 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -9,7 +9,7 @@
    import numpy
    numpy.set_printoptions(precision=3, suppress=True)
 
-   import bob.learn.misc
+   import bob.learn.em
 
    import os
    import tempfile
@@ -38,12 +38,12 @@ K-means machines
 method which aims to partition a set of observations into :math:`k` clusters.
 The `training` procedure is described further below. Here, we explain only how
 to use the resulting machine. For the sake of example, we create a new
-:py:class:`bob.learn.misc.KMeansMachine` as follows:
+:py:class:`bob.learn.em.KMeansMachine` as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> machine = bob.learn.misc.KMeansMachine(2,3) # Two clusters with a feature dimensionality of 3
+   >>> machine = bob.learn.em.KMeansMachine(2,3) # Two clusters with a feature dimensionality of 3
    >>> machine.means = numpy.array([[1,0,0],[0,0,1]], 'float64') # Defines the two clusters
 
 Then, given some input data, it is possible to determine to which cluster the
@@ -60,7 +60,7 @@ data is the closest as well as the min distance.
 Gaussian machines
 =================
 
-The :py:class:`bob.learn.misc.Gaussian` represents a `multivariate diagonal
+The :py:class:`bob.learn.em.Gaussian` represents a `multivariate diagonal
 Gaussian (or normal) distribution
 <http://en.wikipedia.org/wiki/Multivariate_normal_distribution>`_. In this
 context, a *diagonal* Gaussian refers to the covariance matrix of the
@@ -68,13 +68,13 @@ distribution being diagonal. When the covariance matrix is diagonal, each
 variable in the distribution is independent of the others.
 
 Objects of this class are normally used as building blocks for more complex
-:py:class:`bob.learn.misc.GMMMachine` or GMM objects, but can also be used
+:py:class:`bob.learn.em.GMMMachine` or GMM objects, but can also be used
 individually. Here is how to create one multivariate diagonal Gaussian
 distribution:
 
 .. doctest::
 
-  >>> g = bob.learn.misc.Gaussian(2) #bi-variate diagonal normal distribution
+  >>> g = bob.learn.em.Gaussian(2) #bi-variate diagonal normal distribution
   >>> g.mean = numpy.array([0.3, 0.7], 'float64')
   >>> g.mean
   array([ 0.3,  0.7])
@@ -82,7 +82,7 @@ distribution:
   >>> g.variance
   array([ 0.2,  0.1])
 
-Once the :py:class:`bob.learn.misc.Gaussian` has been set, you can use it to
+Once the :py:class:`bob.learn.em.Gaussian` has been set, you can use it to
 estimate the log-likelihood of an input feature vector with a matching number
 of dimensions:
 
@@ -91,25 +91,25 @@ of dimensions:
   >>> log_likelihood = g(numpy.array([0.4, 0.4], 'float64'))
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`bob.learn.misc.Gaussian.save` and the class constructor
+:py:meth:`bob.learn.em.Gaussian.save` and the class constructor
 respectively.
 
 Gaussian mixture models
 =======================
 
-The :py:class:`bob.learn.misc.GMMMachine` represents a Gaussian `mixture model
+The :py:class:`bob.learn.em.GMMMachine` represents a Gaussian `mixture model
 <http://en.wikipedia.org/wiki/Mixture_model>`_ (GMM), which consists of a
-mixture of weighted :py:class:`bob.learn.misc.Gaussian`\s.
+mixture of weighted :py:class:`bob.learn.em.Gaussian`\s.
 
 .. doctest::
 
-  >>> gmm = bob.learn.misc.GMMMachine(2,3) # Mixture of two diagonal Gaussian of dimension 3
+  >>> gmm = bob.learn.em.GMMMachine(2,3) # Mixture of two diagonal Gaussian of dimension 3
 
 By default, the diagonal Gaussian distributions of the GMM are initialized with
 zero mean and unit variance, and the weights are identical. This can be updated
-using the :py:attr:`bob.learn.misc.GMMMachine.means`,
-:py:attr:`bob.learn.misc.GMMMachine.variances` or
-:py:attr:`bob.learn.misc.GMMMachine.weights`.
+using the :py:attr:`bob.learn.em.GMMMachine.means`,
+:py:attr:`bob.learn.em.GMMMachine.variances` or
+:py:attr:`bob.learn.em.GMMMachine.weights`.
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
@@ -121,7 +121,7 @@ using the :py:attr:`bob.learn.misc.GMMMachine.means`,
   array([[ 1.,  6.,  2.],
        [ 4.,  3.,  2.]])
 
-Once the :py:class:`bob.learn.misc.GMMMachine` has been set, you can use it to
+Once the :py:class:`bob.learn.em.GMMMachine` has been set, you can use it to
 estimate the log-likelihood of an input feature vector with a matching number
 of dimensions:
 
@@ -130,12 +130,12 @@ of dimensions:
   >>> log_likelihood = gmm(numpy.array([5.1, 4.7, -4.9], 'float64'))
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`bob.learn.misc.GMMMachine.save` and the class constructor respectively.
+:py:meth:`bob.learn.em.GMMMachine.save` and the class constructor respectively.
 
 Gaussian mixture models Statistics
 ==================================
 
-The :py:class:`bob.learn.misc.GMMStats` is a container for the sufficient
+The :py:class:`bob.learn.em.GMMStats` is a container for the sufficient
 statistics of a GMM distribution.
 
 Given a GMM, the sufficient statistics of a sample can be computed as
@@ -144,7 +144,7 @@ follows:
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = bob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.em.GMMStats(2,3)
   >>> sample = numpy.array([0.5, 4.5, 1.5])
   >>> gmm.acc_statistics(sample, gs)
   >>> print(gs) # doctest: +SKIP
@@ -155,7 +155,7 @@ considering the following attributes.
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = bob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.em.GMMStats(2,3)
   >>> log_likelihood = -3. # log-likelihood of the accumulated samples
   >>> T = 1 # Number of samples used to accumulate statistics
   >>> n = numpy.array([0.4, 0.6], 'float64') # zeroth order stats
@@ -176,19 +176,19 @@ a within-class subspace :math:`U`, a between-class subspace :math:`V`, and a
 subspace for the residuals :math:`D` to capture and suppress a significant
 portion of between-class variation.
 
-An instance of :py:class:`bob.learn.misc.JFABase` carries information about
+An instance of :py:class:`bob.learn.em.JFABase` carries information about
 the matrices :math:`U`, :math:`V` and :math:`D`, which can be shared between
 several classes.  In contrast, after the enrolment phase, an instance of
-:py:class:`bob.learn.misc.JFAMachine` carries class-specific information about
+:py:class:`bob.learn.em.JFAMachine` carries class-specific information about
 the latent variables :math:`y` and :math:`z`.
 
-An instance of :py:class:`bob.learn.misc.JFABase` can be initialized as
+An instance of :py:class:`bob.learn.em.JFABase` can be initialized as
 follows, given an existing GMM:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> jfa_base = bob.learn.misc.JFABase(gmm,2,2) # dimensions of U and V are both equal to 2
+  >>> jfa_base = bob.learn.em.JFABase(gmm,2,2) # dimensions of U and V are both equal to 2
   >>> U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
   >>> V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
   >>> d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
@@ -196,33 +196,33 @@ follows, given an existing GMM:
   >>> jfa_base.v = V
   >>> jfa_base.d = d
 
-Next, this :py:class:`bob.learn.misc.JFABase` can be shared by several
-instances of :py:class:`bob.learn.misc.JFAMachine`, the initialization being
+Next, this :py:class:`bob.learn.em.JFABase` can be shared by several
+instances of :py:class:`bob.learn.em.JFAMachine`, the initialization being
 as follows:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> m = bob.learn.misc.JFAMachine(jfa_base)
+  >>> m = bob.learn.em.JFAMachine(jfa_base)
   >>> m.y = numpy.array([1,2], 'float64')
   >>> m.z = numpy.array([3,4,1,2,0,1], 'float64')
 
 
-Once the :py:class:`bob.learn.misc.JFAMachine` has been configured for a
+Once the :py:class:`bob.learn.em.JFAMachine` has been configured for a
 specific class, the log-likelihood (score) that an input sample belongs to the
 enrolled class, can be estimated, by first computing the GMM sufficient
 statistics of this input sample, and then calling the
-:py:meth:`bob.learn.misc.JFAMachine.forward` on the sufficient statistics.
+:py:meth:`bob.learn.em.JFAMachine.forward` on the sufficient statistics.
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = bob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.em.GMMStats(2,3)
   >>> gmm.acc_statistics(sample, gs)
   >>> score = m.forward(gs)
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`bob.learn.misc.JFAMachine.save` and the class constructor
+:py:meth:`bob.learn.em.JFAMachine.save` and the class constructor
 respectively.
 
 
@@ -236,47 +236,47 @@ the Gaussian mixture modelling approach. It utilises a within-class subspace
 significant portion of between-class variation. The main difference compared to
 JFA is the absence of the between-class subspace :math:`V`.
 
-Similarly to JFA, an instance of :py:class:`bob.learn.misc.JFABase` carries
+Similarly to JFA, an instance of :py:class:`bob.learn.em.JFABase` carries
 information about the matrices :math:`U` and :math:`D`, which can be shared
 between several classes, whereas an instance of
-:py:class:`bob.learn.misc.JFAMachine` carries class-specific information about
+:py:class:`bob.learn.em.JFAMachine` carries class-specific information about
 the latent variable :math:`z`.
 
-An instance of :py:class:`bob.learn.misc.ISVBase` can be initialized as
+An instance of :py:class:`bob.learn.em.ISVBase` can be initialized as
 follows, given an existing GMM:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> isv_base = bob.learn.misc.ISVBase(gmm,2) # dimension of U is equal to 2
+  >>> isv_base = bob.learn.em.ISVBase(gmm,2) # dimension of U is equal to 2
   >>> isv_base.u = U
   >>> isv_base.d = d
 
-Next, this :py:class:`bob.learn.misc.ISVBase` can be shared by several
-instances of :py:class:`bob.learn.misc.ISVMachine`, the initialization being
+Next, this :py:class:`bob.learn.em.ISVBase` can be shared by several
+instances of :py:class:`bob.learn.em.ISVMachine`, the initialization being
 as follows:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> m = bob.learn.misc.ISVMachine(isv_base)
+  >>> m = bob.learn.em.ISVMachine(isv_base)
   >>> m.z = numpy.array([3,4,1,2,0,1], 'float64')
 
-Once the :py:class:`bob.learn.misc.ISVMachine` has been configured for a
+Once the :py:class:`bob.learn.em.ISVMachine` has been configured for a
 specific class, the log-likelihood (score) that an input sample belongs to the
 enrolled class, can be estimated, by first computing the GMM sufficient
 statistics of this input sample, and then calling the
-:py:meth:`bob.learn.misc.ISVMachine.forward` on the sufficient statistics.
+:py:meth:`bob.learn.em.ISVMachine.forward` on the sufficient statistics.
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = bob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.em.GMMStats(2,3)
   >>> gmm.acc_statistics(sample, gs)
   >>> score = m.forward(gs)
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`bob.learn.misc.ISVMachine.save` and the class constructor
+:py:meth:`bob.learn.em.ISVMachine.save` and the class constructor
 respectively.
 
 
@@ -289,30 +289,30 @@ dimensionality called ``i-vectors``. The model consists of a subspace :math:`T`
 and a residual diagonal covariance matrix :math:`\Sigma`, that are then used to
 extract i-vectors, and is built upon the GMM approach.
 
-An instance of the class :py:class:`bob.learn.misc.IVectorMachine` carries
+An instance of the class :py:class:`bob.learn.em.IVectorMachine` carries
 information about these two matrices. This can be initialized as follows:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> m = bob.learn.misc.IVectorMachine(gmm, 2)
+  >>> m = bob.learn.em.IVectorMachine(gmm, 2)
   >>> m.t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
   >>> m.sigma = numpy.array([1.,2.,1.,3.,2.,4.])
 
 
-Once the :py:class:`bob.learn.misc.IVectorMachine` has been set, the
+Once the :py:class:`bob.learn.em.IVectorMachine` has been set, the
 extraction of an i-vector :math:`w_{ij}` can be done in two steps, by first
 extracting the GMM sufficient statistics, and then estimating the i-vector:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = bob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.em.GMMStats(2,3)
   >>> gmm.acc_statistics(sample, gs)
   >>> w_ij = m.forward(gs)
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`bob.learn.misc.IVectorMachine.save` and the class constructor
+:py:meth:`bob.learn.em.IVectorMachine.save` and the class constructor
 respectively.
 
 
@@ -332,22 +332,22 @@ diagonal covariance matrix :math:`\Sigma`, the model assumes that a sample
 
 Information about a PLDA model (:math:`\mu`, :math:`F`, :math:`G` and
 :math:`\Sigma`) are carried out by an instance of the class
-:py:class:`bob.learn.misc.PLDABase`.
+:py:class:`bob.learn.em.PLDABase`.
 
 .. doctest::
 
    >>> ### This creates a PLDABase container for input feature of dimensionality 3,
    >>> ### and with subspaces F and G of rank 1 and 2 respectively.
-   >>> pldabase = bob.learn.misc.PLDABase(3,1,2)
+   >>> pldabase = bob.learn.em.PLDABase(3,1,2)
 
 Class-specific information (usually from enrollment samples) are contained in
-an instance of :py:class:`bob.learn.misc.PLDAMachine`, that must be attached
-to a given :py:class:`bob.learn.misc.PLDABase`. Once done, log-likelihood
+an instance of :py:class:`bob.learn.em.PLDAMachine`, that must be attached
+to a given :py:class:`bob.learn.em.PLDABase`. Once done, log-likelihood
 computations can be performed.
 
 .. doctest::
 
-   >>> plda = bob.learn.misc.PLDAMachine(pldabase)
+   >>> plda = bob.learn.em.PLDAMachine(pldabase)
    >>> samples = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
    >>> loglike = plda.compute_log_likelihood(samples)
 
@@ -373,13 +373,13 @@ class, the training data is passed in a 2D :py:class:`numpy.ndarray` container.
    >>> data = numpy.array([[3,-3,100], [4,-4,98], [3.5,-3.5,99], [-7,7,-100], [-5,5,-101]], dtype='float64')
 
 The training procedure will learn the `means` for the
-:py:class:`bob.learn.misc.KMeansMachine`. The number :math:`k` of `means` is given
+:py:class:`bob.learn.em.KMeansMachine`. The number :math:`k` of `means` is given
 when creating the `machine`, as well as the dimensionality of the features.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> kmeans = bob.learn.misc.KMeansMachine(2, 3) # Create a machine with k=2 clusters with a dimensionality equal to 3
+   >>> kmeans = bob.learn.em.KMeansMachine(2, 3) # Create a machine with k=2 clusters with a dimensionality equal to 3
 
 Then training procedure for `k-means` is an **Expectation-Maximization**-based
 [8]_ algorithm. There are several options that can be set such as the maximum
@@ -390,7 +390,7 @@ be called.
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> kmeansTrainer = bob.learn.misc.KMeansTrainer()
+   >>> kmeansTrainer = bob.learn.em.KMeansTrainer()
    >>> kmeansTrainer.max_iterations = 200
    >>> kmeansTrainer.convergence_threshold = 1e-5
 
@@ -407,18 +407,18 @@ A Gaussian **mixture model** (GMM) [9]_ is a common probabilistic model. In
 order to train the parameters of such a model it is common to use a
 **maximum-likelihood** (ML) approach [10]_. To do this we use an
 **Expectation-Maximization** (EM) algorithm [8]_. Let's first start by creating
-a :py:class:`bob.learn.misc.GMMMachine`. By default, all of the Gaussian's have
+a :py:class:`bob.learn.em.GMMMachine`. By default, all of the Gaussian's have
 zero-mean and unit variance, and all the weights are equal. As a starting
 point, we could set the mean to the one obtained with **k-means** [7]_.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> gmm = bob.learn.misc.GMMMachine(2,3) # Create a machine with 2 Gaussian and feature dimensionality 3
+   >>> gmm = bob.learn.em.GMMMachine(2,3) # Create a machine with 2 Gaussian and feature dimensionality 3
    >>> gmm.means = kmeans.means # Set the means to the one obtained with k-means
 
 The |project| class to learn the parameters of a GMM [9]_ using ML [10]_ is
-:py:class:`bob.learn.misc.ML_GMMTrainer`. It uses an **EM**-based [8]_ algorithm
+:py:class:`bob.learn.em.ML_GMMTrainer`. It uses an **EM**-based [8]_ algorithm
 and requires the user to specify which parameters of the GMM are updated at
 each iteration (means, variances and/or weights). In addition, and as for
 **k-means** [7]_, it has parameters such as the maximum number of iterations
@@ -427,7 +427,7 @@ and the criterion used to determine if the parameters have converged.
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> trainer = bob.learn.misc.ML_GMMTrainer(True, True, True) # update means/variances/weights at each iteration
+   >>> trainer = bob.learn.em.ML_GMMTrainer(True, True, True) # update means/variances/weights at each iteration
    >>> trainer.convergence_threshold = 1e-5
    >>> trainer.max_iterations = 200
    >>> trainer.train(gmm, data)
@@ -459,7 +459,7 @@ The training data used to compute the MAP estimate [11]_ is again stored in a
    >>> dataMAP = numpy.array([[7,-7,102], [6,-6,103], [-3.5,3.5,-97]], dtype='float64')
 
 The |project| class used to perform MAP adaptation training [11]_ is
-:py:class:`bob.learn.misc.MAP_GMMTrainer`. As with the ML estimate [10]_, it uses
+:py:class:`bob.learn.em.MAP_GMMTrainer`. As with the ML estimate [10]_, it uses
 an **EM**-based [8]_ algorithm and requires the user to specify which parts of
 the GMM are adapted at each iteration (means, variances and/or weights). In
 addition, it also has parameters such as the maximum number of iterations and
@@ -472,12 +472,12 @@ set.
    :options: +NORMALIZE_WHITESPACE
 
    >>> relevance_factor = 4.
-   >>> trainer = bob.learn.misc.MAP_GMMTrainer(relevance_factor, True, False, False) # mean adaptation only
+   >>> trainer = bob.learn.em.MAP_GMMTrainer(relevance_factor, True, False, False) # mean adaptation only
    >>> trainer.convergence_threshold = 1e-5
    >>> trainer.max_iterations = 200
    >>> trainer.set_prior_gmm(gmm)
    True
-   >>> gmmAdapted = bob.learn.misc.GMMMachine(2,3) # Create a new machine for the MAP estimate
+   >>> gmmAdapted = bob.learn.em.GMMMachine(2,3) # Create a new machine for the MAP estimate
    >>> trainer.train(gmmAdapted, dataMAP)
    >>> print(gmmAdapted) # doctest: +SKIP
 
@@ -501,40 +501,40 @@ done, we get a training set of GMM statistics:
    >>> N2 = numpy.array([0.1069, 0.9397, 0.6164, 0.3545]).reshape((2,2))
    >>> N=[N1, N2]
 
-   >>> gs11 = bob.learn.misc.GMMStats(2,3)
+   >>> gs11 = bob.learn.em.GMMStats(2,3)
    >>> gs11.n = N1[:,0]
    >>> gs11.sum_px = F1[:,0].reshape(2,3)
-   >>> gs12 = bob.learn.misc.GMMStats(2,3)
+   >>> gs12 = bob.learn.em.GMMStats(2,3)
    >>> gs12.n = N1[:,1]
    >>> gs12.sum_px = F1[:,1].reshape(2,3)
 
-   >>> gs21 = bob.learn.misc.GMMStats(2,3)
+   >>> gs21 = bob.learn.em.GMMStats(2,3)
    >>> gs21.n = N2[:,0]
    >>> gs21.sum_px = F2[:,0].reshape(2,3)
-   >>> gs22 = bob.learn.misc.GMMStats(2,3)
+   >>> gs22 = bob.learn.em.GMMStats(2,3)
    >>> gs22.n = N2[:,1]
    >>> gs22.sum_px = F2[:,1].reshape(2,3)
 
    >>> TRAINING_STATS = [[gs11, gs12], [gs21, gs22]]
 
-In the following, we will allocate a :py:class:`bob.learn.misc.JFABase` machine,
+In the following, we will allocate a :py:class:`bob.learn.em.JFABase` machine,
 that will then be trained.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-    >>> jfa_base = bob.learn.misc.JFABase(gmm, 2, 2) # the dimensions of U and V are both equal to 2
+    >>> jfa_base = bob.learn.em.JFABase(gmm, 2, 2) # the dimensions of U and V are both equal to 2
 
 Next, we initialize a trainer, which is an instance of
-:py:class:`bob.learn.misc.JFATrainer`, as follows:
+:py:class:`bob.learn.em.JFATrainer`, as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> jfa_trainer = bob.learn.misc.JFATrainer(10) # 10 is the number of iterations
+   >>> jfa_trainer = bob.learn.em.JFATrainer(10) # 10 is the number of iterations
 
 The training process is started by calling the
-:py:meth:`bob.learn.misc.JFATrainer.train`.
+:py:meth:`bob.learn.em.JFATrainer.train`.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
@@ -552,10 +552,10 @@ the following.
 
    >>> Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
    >>> Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
-   >>> gse1 = bob.learn.misc.GMMStats(2,3)
+   >>> gse1 = bob.learn.em.GMMStats(2,3)
    >>> gse1.n = Ne[:,0]
    >>> gse1.sum_px = Fe[:,0].reshape(2,3)
-   >>> gse2 = bob.learn.misc.GMMStats(2,3)
+   >>> gse2 = bob.learn.em.GMMStats(2,3)
    >>> gse2.n = Ne[:,1]
    >>> gse2.sum_px = Fe[:,1].reshape(2,3)
    >>> gse = [gse1, gse2]
@@ -566,7 +566,7 @@ the class-specific latent variables :math:`y` and :math:`z`:
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> m = bob.learn.misc.JFAMachine(jfa_base)
+   >>> m = bob.learn.em.JFAMachine(jfa_base)
    >>> jfa_trainer.enrol(m, gse, 5) # where 5 is the number of enrollment iterations
 
 More information about the training process can be found in [12]_ and [13]_.
@@ -579,23 +579,23 @@ The training of the subspace :math:`U` and :math:`D` of an Inter-Session
 Variability model, is performed in two steps. As for JFA, GMM sufficient
 statistics of the training samples should be computed against the UBM GMM. Once
 done, we get a training set of GMM statistics.  Next, we will allocate an
-:py:class:`bob.learn.misc.ISVBase` machine, that will then be trained.
+:py:class:`bob.learn.em.ISVBase` machine, that will then be trained.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-    >>> isv_base = bob.learn.misc.ISVBase(gmm, 2) # the dimensions of U is equal to 2
+    >>> isv_base = bob.learn.em.ISVBase(gmm, 2) # the dimensions of U is equal to 2
 
 Next, we initialize a trainer, which is an instance of
-:py:class:`bob.learn.misc.ISVTrainer`, as follows:
+:py:class:`bob.learn.em.ISVTrainer`, as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> isv_trainer = bob.learn.misc.ISVTrainer(10, 4.) # 10 is the number of iterations, and 4 is the relevance factor
+   >>> isv_trainer = bob.learn.em.ISVTrainer(10, 4.) # 10 is the number of iterations, and 4 is the relevance factor
 
 The training process is started by calling the
-:py:meth:`bob.learn.misc.ISVTrainer.train`.
+:py:meth:`bob.learn.em.ISVTrainer.train`.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
@@ -611,7 +611,7 @@ estimate the class-specific latent variable :math:`z`:
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> m = bob.learn.misc.ISVMachine(isv_base)
+   >>> m = bob.learn.em.ISVMachine(isv_base)
    >>> isv_trainer.enrol(m, gse, 5) # where 5 is the number of iterations
 
 More information about the training process can be found in [14]_ and [13]_.
@@ -624,27 +624,27 @@ The training of the subspace :math:`T` and :math:`\Sigma` of a Total
 Variability model, is performed in two steps. As for JFA and ISV, GMM
 sufficient statistics of the training samples should be computed against the
 UBM GMM. Once done, we get a training set of GMM statistics.  Next, we will
-allocate an instance of :py:class:`bob.learn.misc.IVectorMachine`, that will
+allocate an instance of :py:class:`bob.learn.em.IVectorMachine`, that will
 then be trained.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-    >>> m = bob.learn.misc.IVectorMachine(gmm, 2)
+    >>> m = bob.learn.em.IVectorMachine(gmm, 2)
     >>> m.variance_threshold = 1e-5
 
 
 Next, we initialize a trainer, which is an instance of
-:py:class:`bob.learn.misc.IVectorTrainer`, as follows:
+:py:class:`bob.learn.em.IVectorTrainer`, as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> ivec_trainer = bob.learn.misc.IVectorTrainer(update_sigma=True, max_iterations=10)
+   >>> ivec_trainer = bob.learn.em.IVectorTrainer(update_sigma=True, max_iterations=10)
    >>> TRAINING_STATS_flatten = [gs11, gs12, gs21, gs22]
 
 The training process is started by calling the
-:py:meth:`bob.learn.misc.IVectorTrainer.train`.
+:py:meth:`bob.learn.em.IVectorTrainer.train`.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
@@ -671,7 +671,7 @@ diagonal covariance matrix :math:`\Sigma`, the model assumes that a sample
 An Expectaction-Maximization algorithm can be used to learn the parameters of
 this model :math:`\mu`, :math:`F` :math:`G` and :math:`\Sigma`. As these
 parameters can be shared between classes, there is a specific container class
-for this purpose, which is :py:class:`bob.learn.misc.PLDABase`. The process is
+for this purpose, which is :py:class:`bob.learn.em.PLDABase`. The process is
 described in detail in [17]_.
 
 Let us consider a training set of two classes, each with 3 samples of
@@ -685,43 +685,43 @@ dimensionality 3.
    >>> data = [data1,data2]
 
 Learning a PLDA model can be performed by instantiating the class
-:py:class:`bob.learn.misc.PLDATrainer`, and calling the
-:py:meth:`bob.learn.misc.PLDATrainer.train()` method.
+:py:class:`bob.learn.em.PLDATrainer`, and calling the
+:py:meth:`bob.learn.em.PLDATrainer.train()` method.
 
 .. doctest::
 
    >>> ### This creates a PLDABase container for input feature of dimensionality 3,
    >>> ### and with subspaces F and G of rank 1 and 2 respectively.
-   >>> pldabase = bob.learn.misc.PLDABase(3,1,2)
+   >>> pldabase = bob.learn.em.PLDABase(3,1,2)
 
-   >>> trainer = bob.learn.misc.PLDATrainer()
+   >>> trainer = bob.learn.em.PLDATrainer()
    >>> trainer.train(pldabase, data)
 
 Once trained, this PLDA model can be used to compute the log-likelihood of a
 set of samples given some hypothesis. For this purpose, a
-:py:class:`bob.learn.misc.PLDAMachine` should be instantiated. Then, the
+:py:class:`bob.learn.em.PLDAMachine` should be instantiated. Then, the
 log-likelihood that a set of samples share the same latent identity variable
 :math:`h_{i}` (i.e. the samples are coming from the same identity/class) is
 obtained by calling the
-:py:meth:`bob.learn.misc.PLDAMachine.compute_log_likelihood()` method.
+:py:meth:`bob.learn.em.PLDAMachine.compute_log_likelihood()` method.
 
 .. doctest::
 
-   >>> plda = bob.learn.misc.PLDAMachine(pldabase)
+   >>> plda = bob.learn.em.PLDAMachine(pldabase)
    >>> samples = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
    >>> loglike = plda.compute_log_likelihood(samples)
 
 If separate models for different classes need to be enrolled, each of them with
 a set of enrolment samples, then, several instances of
-:py:class:`bob.learn.misc.PLDAMachine` need to be created and enroled using
-the :py:meth:`bob.learn.misc.PLDATrainer.enrol()` method as follows.
+:py:class:`bob.learn.em.PLDAMachine` need to be created and enroled using
+the :py:meth:`bob.learn.em.PLDATrainer.enrol()` method as follows.
 
 .. doctest::
 
-   >>> plda1 = bob.learn.misc.PLDAMachine(pldabase)
+   >>> plda1 = bob.learn.em.PLDAMachine(pldabase)
    >>> samples1 = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
    >>> trainer.enrol(plda1, samples1)
-   >>> plda2 = bob.learn.misc.PLDAMachine(pldabase)
+   >>> plda2 = bob.learn.em.PLDAMachine(pldabase)
    >>> samples2 = numpy.array([[3.5,7,-49], [4.5,8.9,-99]], dtype=numpy.float64)
    >>> trainer.enrol(plda2, samples2)
 
@@ -738,8 +738,8 @@ separately for each model.
 In a verification scenario, there are two possible hypotheses: 1.
 :math:`x_{test}` and :math:`x_{enrol}` share the same class.  2.
 :math:`x_{test}` and :math:`x_{enrol}` are from different classes.  Using the
-methods :py:meth:`bob.learn.misc.PLDAMachine.forward` or
-:py:meth:`bob.learn.misc.PLDAMachine.__call__` function, the corresponding
+methods :py:meth:`bob.learn.em.PLDAMachine.forward` or
+:py:meth:`bob.learn.em.PLDAMachine.__call__` function, the corresponding
 log-likelihood ratio will be computed, which is defined in more formal way by:
 :math:`s = \ln(P(x_{test},x_{enrol})) - \ln(P(x_{test})P(x_{enrol}))`
 
diff --git a/doc/index.rst b/doc/index.rst
index cc40c8d2f3949e95a77f4b48ba74a727a670c74e..a160f0a417d0df4a919803fd358dc854c06bfb70 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -4,7 +4,7 @@
 ..
 .. Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
 
-.. _bob.learn.misc:
+.. _bob.learn.em:
 
 ======================================
  Miscellaneous Machine Learning Tools
diff --git a/doc/py_api.rst b/doc/py_api.rst
index c1c954148c7fd2a5f050d0840a2f857ebc12bcfe..e5f18c34582916dd6cd3d304701336336440dedb 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -7,8 +7,8 @@
 ============
 
 This section includes information for using the pure Python API of
-``bob.learn.misc``.
+``bob.learn.em``.
 
 
-.. automodule:: bob.learn.misc
+.. automodule:: bob.learn.em
 
diff --git a/setup.py b/setup.py
index 875085d7a75b9194b1a5b1734f3ec38ca120a552..3f89ff52018f14f373ba3a4206ca5e33946942e9 100644
--- a/setup.py
+++ b/setup.py
@@ -20,10 +20,10 @@ boost_modules = ['system', 'python']
 
 setup(
 
-    name='bob.learn.misc',
+    name='bob.learn.em',
     version=version,
-    description='Bindings for miscelaneous machines and trainers of Bob',
-    url='http://github.com/bioidiap/bob.learn.misc',
+    description='Bindings for emelaneous machines and trainers of Bob',
+    url='http://github.com/bioidiap/bob.learn.em',
     license='BSD',
     author='Andre Anjos',
     author_email='andre.anjos@idiap.ch',
@@ -42,9 +42,9 @@ setup(
     ],
 
     ext_modules = [
-      Extension("bob.learn.misc.version",
+      Extension("bob.learn.em.version",
         [
-          "bob/learn/misc/version.cpp",
+          "bob/learn/em/version.cpp",
         ],
         bob_packages = bob_packages,
         packages = packages,
@@ -52,34 +52,34 @@ setup(
         version = version,
       ),
 
-      Library("bob.learn.misc.bob_learn_misc",
+      Library("bob.learn.em.bob_learn_em",
         [
-          "bob/learn/misc/cpp/Gaussian.cpp",
-          "bob/learn/misc/cpp/GMMMachine.cpp",
-          "bob/learn/misc/cpp/GMMStats.cpp",
-          "bob/learn/misc/cpp/IVectorMachine.cpp",
-          "bob/learn/misc/cpp/KMeansMachine.cpp",
-          "bob/learn/misc/cpp/LinearScoring.cpp",
-          "bob/learn/misc/cpp/PLDAMachine.cpp",
-          "bob/learn/misc/cpp/ZTNorm.cpp",
-
-          "bob/learn/misc/cpp/FABase.cpp",
-          "bob/learn/misc/cpp/JFABase.cpp",
-          "bob/learn/misc/cpp/ISVBase.cpp",
-          "bob/learn/misc/cpp/JFAMachine.cpp",
-          "bob/learn/misc/cpp/ISVMachine.cpp",
-
-          "bob/learn/misc/cpp/FABaseTrainer.cpp",
-          "bob/learn/misc/cpp/JFATrainer.cpp",
-          "bob/learn/misc/cpp/ISVTrainer.cpp",
-
-          "bob/learn/misc/cpp/EMPCATrainer.cpp",
-          "bob/learn/misc/cpp/GMMBaseTrainer.cpp",
-          "bob/learn/misc/cpp/IVectorTrainer.cpp",
-          "bob/learn/misc/cpp/KMeansTrainer.cpp",
-          "bob/learn/misc/cpp/MAP_GMMTrainer.cpp",
-          "bob/learn/misc/cpp/ML_GMMTrainer.cpp",
-          "bob/learn/misc/cpp/PLDATrainer.cpp",
+          "bob/learn/em/cpp/Gaussian.cpp",
+          "bob/learn/em/cpp/GMMMachine.cpp",
+          "bob/learn/em/cpp/GMMStats.cpp",
+          "bob/learn/em/cpp/IVectorMachine.cpp",
+          "bob/learn/em/cpp/KMeansMachine.cpp",
+          "bob/learn/em/cpp/LinearScoring.cpp",
+          "bob/learn/em/cpp/PLDAMachine.cpp",
+          "bob/learn/em/cpp/ZTNorm.cpp",
+
+          "bob/learn/em/cpp/FABase.cpp",
+          "bob/learn/em/cpp/JFABase.cpp",
+          "bob/learn/em/cpp/ISVBase.cpp",
+          "bob/learn/em/cpp/JFAMachine.cpp",
+          "bob/learn/em/cpp/ISVMachine.cpp",
+
+          "bob/learn/em/cpp/FABaseTrainer.cpp",
+          "bob/learn/em/cpp/JFATrainer.cpp",
+          "bob/learn/em/cpp/ISVTrainer.cpp",
+
+          "bob/learn/em/cpp/EMPCATrainer.cpp",
+          "bob/learn/em/cpp/GMMBaseTrainer.cpp",
+          "bob/learn/em/cpp/IVectorTrainer.cpp",
+          "bob/learn/em/cpp/KMeansTrainer.cpp",
+          "bob/learn/em/cpp/MAP_GMMTrainer.cpp",
+          "bob/learn/em/cpp/ML_GMMTrainer.cpp",
+          "bob/learn/em/cpp/PLDATrainer.cpp",
         ],
         bob_packages = bob_packages,
         packages = packages,
@@ -87,18 +87,18 @@ setup(
         version = version,
       ),
 
-#      Extension("bob.learn.misc._library",
+#      Extension("bob.learn.em._library",
 #        [
-#          "bob/learn/misc/old/bic.cc",
+#          "bob/learn/em/old/bic.cc",
 #
 #          # external requirements as boost::python bindings
-#          "bob/learn/misc/old/blitz_numpy.cc",
-#          "bob/learn/misc/old/ndarray.cc",
-#          "bob/learn/misc/old/ndarray_numpy.cc",
-#          "bob/learn/misc/old/tinyvector.cc",
-#          "bob/learn/misc/old/random.cc",
+#          "bob/learn/em/old/blitz_numpy.cc",
+#          "bob/learn/em/old/ndarray.cc",
+#          "bob/learn/em/old/ndarray_numpy.cc",
+#          "bob/learn/em/old/tinyvector.cc",
+#          "bob/learn/em/old/random.cc",
 #
-#          "bob/learn/misc/old/main.cc",
+#          "bob/learn/em/old/main.cc",
 #        ],
 #        bob_packages = bob_packages,
 #        packages = packages,
@@ -106,40 +106,40 @@ setup(
 #        version = version,
 #      ),
 
-      Extension("bob.learn.misc._library",
+      Extension("bob.learn.em._library",
         [
-          "bob/learn/misc/gaussian.cpp",
-          "bob/learn/misc/gmm_stats.cpp",
-          "bob/learn/misc/gmm_machine.cpp",
-          "bob/learn/misc/kmeans_machine.cpp",
-          "bob/learn/misc/kmeans_trainer.cpp",
-
-          "bob/learn/misc/ML_gmm_trainer.cpp",
-          "bob/learn/misc/MAP_gmm_trainer.cpp",
-
-          "bob/learn/misc/jfa_base.cpp",
-          "bob/learn/misc/jfa_machine.cpp",
-          "bob/learn/misc/jfa_trainer.cpp",
-
-          "bob/learn/misc/isv_base.cpp",
-          "bob/learn/misc/isv_machine.cpp",
-          "bob/learn/misc/isv_trainer.cpp",
+          "bob/learn/em/gaussian.cpp",
+          "bob/learn/em/gmm_stats.cpp",
+          "bob/learn/em/gmm_machine.cpp",
+          "bob/learn/em/kmeans_machine.cpp",
+          "bob/learn/em/kmeans_trainer.cpp",
+
+          "bob/learn/em/ML_gmm_trainer.cpp",
+          "bob/learn/em/MAP_gmm_trainer.cpp",
+
+          "bob/learn/em/jfa_base.cpp",
+          "bob/learn/em/jfa_machine.cpp",
+          "bob/learn/em/jfa_trainer.cpp",
+
+          "bob/learn/em/isv_base.cpp",
+          "bob/learn/em/isv_machine.cpp",
+          "bob/learn/em/isv_trainer.cpp",
           
-          "bob/learn/misc/ivector_machine.cpp",
-          "bob/learn/misc/ivector_trainer.cpp",
+          "bob/learn/em/ivector_machine.cpp",
+          "bob/learn/em/ivector_trainer.cpp",
           
-          "bob/learn/misc/plda_base.cpp",
-          "bob/learn/misc/plda_machine.cpp",
+          "bob/learn/em/plda_base.cpp",
+          "bob/learn/em/plda_machine.cpp",
 
-          "bob/learn/misc/empca_trainer.cpp",
+          "bob/learn/em/empca_trainer.cpp",
 
-          "bob/learn/misc/plda_trainer.cpp",
+          "bob/learn/em/plda_trainer.cpp",
 
-          "bob/learn/misc/ztnorm.cpp",
+          "bob/learn/em/ztnorm.cpp",
 
-          "bob/learn/misc/linear_scoring.cpp",
+          "bob/learn/em/linear_scoring.cpp",
 
-          "bob/learn/misc/main.cpp",
+          "bob/learn/em/main.cpp",
         ],
         bob_packages = bob_packages,
         packages = packages,