diff --git a/bob/learn/em/.DS_Store b/bob/learn/em/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..47a80a664e3fd38c9114f0953bbebcd70e7d0146
Binary files /dev/null and b/bob/learn/em/.DS_Store differ
diff --git a/bob/learn/em/MAP_gmm_trainer.cpp b/bob/learn/em/MAP_gmm_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..570ed8dd7d57a52c250f7187a09b99ec2469241e
--- /dev/null
+++ b/bob/learn/em/MAP_gmm_trainer.cpp
@@ -0,0 +1,431 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Web 23 Jan 16:42:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
+
+static auto MAP_GMMTrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".MAP_GMMTrainer",
+  "This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation."
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a MAP_GMMTrainer",
+    "",
+    true
+  )
+
+  .add_prototype("prior_gmm,relevance_factor, update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
+  .add_prototype("prior_gmm,alpha, update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
+  .add_prototype("other","")
+  .add_prototype("","")
+
+  .add_parameter("prior_gmm", ":py:class:`bob.learn.em.GMMMachine`", "The prior GMM to be adapted (Universal Backgroud Model UBM).")
+  .add_parameter("reynolds_adaptation", "bool", "Will use the Reynolds adaptation procedure? See Eq (14) from [Reynolds2000]_")
+  .add_parameter("relevance_factor", "double", "If set the reynolds_adaptation parameters, will apply the Reynolds Adaptation procedure. See Eq (14) from [Reynolds2000]_")
+  .add_parameter("alpha", "double", "Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.")
+
+  .add_parameter("update_means", "bool", "Update means on each iteration")
+  .add_parameter("update_variances", "bool", "Update variances on each iteration")
+  .add_parameter("update_weights", "bool", "Update weights on each iteration")
+  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
+
+  .add_parameter("other", ":py:class:`bob.learn.em.MAP_GMMTrainer`", "A MAP_GMMTrainer object to be copied.")
+);
+
+
+static int PyBobLearnEMMAPGMMTrainer_init_copy(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = MAP_GMMTrainer_doc.kwlist(2);
+  PyBobLearnEMMAPGMMTrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMMAPGMMTrainer_Type, &o)){
+    MAP_GMMTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::MAP_GMMTrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMMAPGMMTrainer_init_base_trainer(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist1 = MAP_GMMTrainer_doc.kwlist(0);
+  char** kwlist2 = MAP_GMMTrainer_doc.kwlist(1);
+  
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  bool reynolds_adaptation   = false;
+  double alpha = 0.5;
+  double relevance_factor = 4.0;
+  double aux = 0;
+
+  PyObject* update_means     = 0;
+  PyObject* update_variances = 0;
+  PyObject* update_weights   = 0;
+  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
+
+  PyObject* keyword_relevance_factor = Py_BuildValue("s", kwlist1[1]);
+  PyObject* keyword_alpha            = Py_BuildValue("s", kwlist2[1]);
+
+  //Here we have to select which keyword argument to read  
+  if (kwargs && PyDict_Contains(kwargs, keyword_relevance_factor) && (PyArg_ParseTupleAndKeywords(args, kwargs, "O!dO!|O!O!d", kwlist1, 
+                                                                      &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                      &aux,
+                                                                      &PyBool_Type, &update_means, 
+                                                                      &PyBool_Type, &update_variances, 
+                                                                      &PyBool_Type, &update_weights, 
+                                                                      &mean_var_update_responsibilities_threshold)))
+    reynolds_adaptation = true;    
+  else if (kwargs && PyDict_Contains(kwargs, keyword_alpha) && (PyArg_ParseTupleAndKeywords(args, kwargs, "O!dO!|O!O!d", kwlist2, 
+                                                                 &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                 &aux,
+                                                                 &PyBool_Type, &update_means, 
+                                                                 &PyBool_Type, &update_variances, 
+                                                                 &PyBool_Type, &update_weights, 
+                                                                 &mean_var_update_responsibilities_threshold)))
+    reynolds_adaptation = false;
+  else{
+    PyErr_Format(PyExc_RuntimeError, "%s. The second argument must be a keyword argument.", Py_TYPE(self)->tp_name);
+    MAP_GMMTrainer_doc.print_usage();
+    return -1;
+  }
+
+  if (reynolds_adaptation)
+    relevance_factor = aux;
+  else
+    alpha = aux;
+  
+  
+  self->cxx.reset(new bob::learn::em::MAP_GMMTrainer(f(update_means), f(update_variances), f(update_weights), 
+                                                       mean_var_update_responsibilities_threshold, 
+                                                       reynolds_adaptation,relevance_factor, alpha, gmm_machine->cxx));
+  return 0;
+
+}
+
+
+
+static int PyBobLearnEMMAPGMMTrainer_init(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // If the constructor input is GMMBaseTrainer object
+  if(PyBobLearnEMMAPGMMTrainer_Check(args))
+    return PyBobLearnEMMAPGMMTrainer_init_copy(self, args, kwargs);
+  else{
+    return PyBobLearnEMMAPGMMTrainer_init_base_trainer(self, args, kwargs);
+  }
+
+  BOB_CATCH_MEMBER("cannot create MAP_GMMTrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMMAPGMMTrainer_delete(PyBobLearnEMMAPGMMTrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMMAPGMMTrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMMAPGMMTrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMMAPGMMTrainer_RichCompare(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMMAPGMMTrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMMAPGMMTrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare MAP_GMMTrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** relevance_factor *****/
+static auto relevance_factor = bob::extension::VariableDoc(
+  "relevance_factor",
+  "double",
+  "If set the reynolds_adaptation parameters, will apply the Reynolds Adaptation Factor. See Eq (14) from [Reynolds2000]_",
+  ""
+);
+PyObject* PyBobLearnEMMAPGMMTrainer_getRelevanceFactor(PyBobLearnEMMAPGMMTrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getRelevanceFactor());
+  BOB_CATCH_MEMBER("relevance_factor could not be read", 0)
+}
+int PyBobLearnEMMAPGMMTrainer_setRelevanceFactor(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  
+  if(!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, relevance_factor.name());
+    return -1;
+  }
+  
+  self->cxx->setRelevanceFactor(PyFloat_AS_DOUBLE(value));
+  return 0;
+  BOB_CATCH_MEMBER("relevance_factor could not be set", 0)
+}
+
+
+/***** alpha *****/
+static auto alpha = bob::extension::VariableDoc(
+  "alpha",
+  "double",
+  "Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.",
+  ""
+);
+PyObject* PyBobLearnEMMAPGMMTrainer_getAlpha(PyBobLearnEMMAPGMMTrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getAlpha());
+  BOB_CATCH_MEMBER("alpha could not be read", 0)
+}
+int PyBobLearnEMMAPGMMTrainer_setAlpha(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  
+  if(!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, alpha.name());
+    return -1;
+  }
+  
+  self->cxx->setAlpha(PyFloat_AS_DOUBLE(value));
+  return 0;
+  BOB_CATCH_MEMBER("alpha could not be set", 0)
+}
+
+
+
+static PyGetSetDef PyBobLearnEMMAPGMMTrainer_getseters[] = { 
+  {
+    alpha.name(),
+    (getter)PyBobLearnEMMAPGMMTrainer_getAlpha,
+    (setter)PyBobLearnEMMAPGMMTrainer_setAlpha,
+    alpha.doc(),
+    0
+  },
+  {
+    relevance_factor.name(),
+    (getter)PyBobLearnEMMAPGMMTrainer_getRelevanceFactor,
+    (setter)PyBobLearnEMMAPGMMTrainer_setRelevanceFactor,
+    relevance_factor.doc(),
+    0
+  },
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMMAPGMMTrainer_initialize(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)){
+    PyErr_Format(PyExc_RuntimeError, "%s.%s. Was not possible to read :py:class:`bob.learn.em.GMMMachine`", Py_TYPE(self)->tp_name, initialize.name());
+    Py_RETURN_NONE;
+  }
+  self->cxx->initialize(*gmm_machine->cxx);
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** eStep ***/
+static auto eStep = bob::extension::FunctionDoc(
+  "eStep",
+  "Calculates and saves statistics across the dataset,"
+  "and saves these as m_ss. ",
+
+  "Calculates the average log likelihood of the observations given the GMM,"
+  "and returns this in average_log_likelihood."
+  "The statistics, m_ss, will be used in the mStep() that follows.",
+
+  true
+)
+.add_prototype("gmm_machine,data")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMMAPGMMTrainer_eStep(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = eStep.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** mStep ***/
+static auto mStep = bob::extension::FunctionDoc(
+  "mStep",
+
+   "Performs a maximum a posteriori (MAP) update of the GMM:"  
+   "* parameters using the accumulated statistics in :py:class:`bob.learn.em.GMMBaseTrainer.m_ss` and the" 
+   "* parameters of the prior model",
+  "",
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMMAPGMMTrainer_mStep(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = mStep.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
+
+  self->cxx->mStep(*gmm_machine->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** computeLikelihood ***/
+static auto compute_likelihood = bob::extension::FunctionDoc(
+  "compute_likelihood",
+  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
+  0,
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMMAPGMMTrainer_compute_likelihood(PyBobLearnEMMAPGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = compute_likelihood.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
+
+  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
+}
+
+
+
+static PyMethodDef PyBobLearnEMMAPGMMTrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMMAPGMMTrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    eStep.name(),
+    (PyCFunction)PyBobLearnEMMAPGMMTrainer_eStep,
+    METH_VARARGS|METH_KEYWORDS,
+    eStep.doc()
+  },
+  {
+    mStep.name(),
+    (PyCFunction)PyBobLearnEMMAPGMMTrainer_mStep,
+    METH_VARARGS|METH_KEYWORDS,
+    mStep.doc()
+  },
+  {
+    compute_likelihood.name(),
+    (PyCFunction)PyBobLearnEMMAPGMMTrainer_compute_likelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_likelihood.doc()
+  },
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMMAPGMMTrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMMAPGMMTrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMMAPGMMTrainer_Type.tp_name      = MAP_GMMTrainer_doc.name();
+  PyBobLearnEMMAPGMMTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMMAPGMMTrainerObject);
+  PyBobLearnEMMAPGMMTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
+  PyBobLearnEMMAPGMMTrainer_Type.tp_doc       = MAP_GMMTrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMMAPGMMTrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMMAPGMMTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMMAPGMMTrainer_init);
+  PyBobLearnEMMAPGMMTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMMAPGMMTrainer_delete);
+  PyBobLearnEMMAPGMMTrainer_Type.tp_richcompare  = reinterpret_cast<richcmpfunc>(PyBobLearnEMMAPGMMTrainer_RichCompare);
+  PyBobLearnEMMAPGMMTrainer_Type.tp_methods      = PyBobLearnEMMAPGMMTrainer_methods;
+  PyBobLearnEMMAPGMMTrainer_Type.tp_getset       = PyBobLearnEMMAPGMMTrainer_getseters;
+  PyBobLearnEMMAPGMMTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMMAPGMMTrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMMAPGMMTrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMMAPGMMTrainer_Type);
+  return PyModule_AddObject(module, "_MAP_GMMTrainer", (PyObject*)&PyBobLearnEMMAPGMMTrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/ML_gmm_trainer.cpp b/bob/learn/em/ML_gmm_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..51d6ce315462fef115b8568051326ef420f953cd
--- /dev/null
+++ b/bob/learn/em/ML_gmm_trainer.cpp
@@ -0,0 +1,335 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Web 22 Jan 16:45:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
+
+static auto ML_GMMTrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".ML_GMMTrainer",
+  "This class implements the maximum likelihood M-step of the expectation-maximisation algorithm for a GMM Machine."
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a ML_GMMTrainer",
+    "",
+    true
+  )
+  .add_prototype("update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
+  .add_prototype("other","")
+  .add_prototype("","")
+
+  .add_parameter("update_means", "bool", "Update means on each iteration")
+  .add_parameter("update_variances", "bool", "Update variances on each iteration")
+  .add_parameter("update_weights", "bool", "Update weights on each iteration")
+  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
+
+
+  .add_parameter("other", ":py:class:`bob.learn.em.ML_GMMTrainer`", "A ML_GMMTrainer object to be copied.")
+);
+
+
+static int PyBobLearnEMMLGMMTrainer_init_copy(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ML_GMMTrainer_doc.kwlist(1);
+  PyBobLearnEMMLGMMTrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMMLGMMTrainer_Type, &o)){
+    ML_GMMTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ML_GMMTrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMMLGMMTrainer_init_base_trainer(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ML_GMMTrainer_doc.kwlist(0);
+  
+  PyObject* update_means     = 0;
+  PyObject* update_variances = 0;
+  PyObject* update_weights   = 0;
+  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!O!d", kwlist, 
+                                   &PyBool_Type, &update_means, 
+                                   &PyBool_Type, &update_variances, 
+                                   &PyBool_Type, &update_weights, 
+                                   &mean_var_update_responsibilities_threshold)){
+    ML_GMMTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ML_GMMTrainer(f(update_means), f(update_variances), f(update_weights), 
+                                                       mean_var_update_responsibilities_threshold));
+  return 0;
+}
+
+
+
+static int PyBobLearnEMMLGMMTrainer_init(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  //Reading the input argument
+  PyObject* arg = 0;
+  if (PyTuple_Size(args))
+    arg = PyTuple_GET_ITEM(args, 0);
+  else {
+    PyObject* tmp = PyDict_Values(kwargs);
+    auto tmp_ = make_safe(tmp);
+    arg = PyList_GET_ITEM(tmp, 0);
+  }
+
+  // If the constructor input is GMMBaseTrainer object
+  if (PyBobLearnEMMLGMMTrainer_Check(arg))
+    return PyBobLearnEMMLGMMTrainer_init_copy(self, args, kwargs);
+  else
+    return PyBobLearnEMMLGMMTrainer_init_base_trainer(self, args, kwargs);
+
+
+
+  BOB_CATCH_MEMBER("cannot create GMMBaseTrainer_init_bool", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMMLGMMTrainer_delete(PyBobLearnEMMLGMMTrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMMLGMMTrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMMLGMMTrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMMLGMMTrainer_RichCompare(PyBobLearnEMMLGMMTrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMMLGMMTrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMMLGMMTrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare ML_GMMTrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+static PyGetSetDef PyBobLearnEMMLGMMTrainer_getseters[] = { 
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMMLGMMTrainer_initialize(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)){
+    PyErr_Format(PyExc_RuntimeError, "%s.%s. Was not possible to read :py:class:`bob.learn.em.GMMMachine`", Py_TYPE(self)->tp_name, initialize.name());
+    Py_RETURN_NONE;
+  }
+  self->cxx->initialize(*gmm_machine->cxx);
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** eStep ***/
+static auto eStep = bob::extension::FunctionDoc(
+  "eStep",
+  "Calculates and saves statistics across the dataset,"
+  "and saves these as m_ss. ",
+
+  "Calculates the average log likelihood of the observations given the GMM,"
+  "and returns this in average_log_likelihood."
+  "The statistics, m_ss, will be used in the mStep() that follows.",
+
+  true
+)
+.add_prototype("gmm_machine,data")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMMLGMMTrainer_eStep(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = eStep.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** mStep ***/
+static auto mStep = bob::extension::FunctionDoc(
+  "mStep",
+  "Performs a maximum likelihood (ML) update of the GMM parameters "
+  "using the accumulated statistics in :py:class:`bob.learn.em.GMMBaseTrainer.m_ss`",
+
+  "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006",
+
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMMLGMMTrainer_mStep(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = mStep.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
+
+  self->cxx->mStep(*gmm_machine->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** computeLikelihood ***/
+static auto compute_likelihood = bob::extension::FunctionDoc(
+  "compute_likelihood",
+  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
+  0,
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMMLGMMTrainer_compute_likelihood(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = compute_likelihood.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
+
+  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
+}
+
+
+
+static PyMethodDef PyBobLearnEMMLGMMTrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMMLGMMTrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    eStep.name(),
+    (PyCFunction)PyBobLearnEMMLGMMTrainer_eStep,
+    METH_VARARGS|METH_KEYWORDS,
+    eStep.doc()
+  },
+  {
+    mStep.name(),
+    (PyCFunction)PyBobLearnEMMLGMMTrainer_mStep,
+    METH_VARARGS|METH_KEYWORDS,
+    mStep.doc()
+  },
+  {
+    compute_likelihood.name(),
+    (PyCFunction)PyBobLearnEMMLGMMTrainer_compute_likelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_likelihood.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMMLGMMTrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMMLGMMTrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMMLGMMTrainer_Type.tp_name      = ML_GMMTrainer_doc.name();
+  PyBobLearnEMMLGMMTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMMLGMMTrainerObject);
+  PyBobLearnEMMLGMMTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
+  PyBobLearnEMMLGMMTrainer_Type.tp_doc       = ML_GMMTrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMMLGMMTrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMMLGMMTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMMLGMMTrainer_init);
+  PyBobLearnEMMLGMMTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMMLGMMTrainer_delete);
+  PyBobLearnEMMLGMMTrainer_Type.tp_richcompare  = reinterpret_cast<richcmpfunc>(PyBobLearnEMMLGMMTrainer_RichCompare);
+  PyBobLearnEMMLGMMTrainer_Type.tp_methods      = PyBobLearnEMMLGMMTrainer_methods;
+  PyBobLearnEMMLGMMTrainer_Type.tp_getset       = PyBobLearnEMMLGMMTrainer_getseters;
+  PyBobLearnEMMLGMMTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMMLGMMTrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMMLGMMTrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMMLGMMTrainer_Type);
+  return PyModule_AddObject(module, "_ML_GMMTrainer", (PyObject*)&PyBobLearnEMMLGMMTrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/__MAP_gmm_trainer__.py b/bob/learn/em/__MAP_gmm_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f4af38590a26a498a639444c1dc7dd801099840
--- /dev/null
+++ b/bob/learn/em/__MAP_gmm_trainer__.py
@@ -0,0 +1,99 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Mon Jan 23 18:31:10 2015
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _MAP_GMMTrainer
+import numpy
+
+# define the class
+class MAP_GMMTrainer(_MAP_GMMTrainer):
+
+  def __init__(self, prior_gmm, update_means=True, update_variances=False, update_weights=False, convergence_threshold=0.001, max_iterations=10, converge_by_likelihood=True, **kwargs):
+    """
+    :py:class:`bob.learn.em.MAP_GMMTrainer` constructor
+
+    Keyword Parameters:
+      update_means
+
+      update_variances
+
+      update_weights
+
+      prior_gmm
+        A :py:class:`bob.learn.em.GMMMachine` to be adapted
+      convergence_threshold
+        Convergence threshold
+      max_iterations
+        Number of maximum iterations
+      converge_by_likelihood
+        Tells whether we compute log_likelihood as a convergence criteria, or not 
+      alpha
+        Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.
+      relevance_factor
+        If set the :py:class:`bob.learn.em.MAP_GMMTrainer.reynolds_adaptation` parameters, will apply the Reynolds Adaptation procedure. See Eq (14) from [Reynolds2000]_  
+    """
+
+    if kwargs.get('alpha')!=None:
+      alpha = kwargs.get('alpha')
+      _MAP_GMMTrainer.__init__(self, prior_gmm,alpha=alpha, update_means=update_means, update_variances=update_variances,update_weights=update_weights)
+    else:
+      relevance_factor = kwargs.get('relevance_factor')
+      _MAP_GMMTrainer.__init__(self, prior_gmm, relevance_factor=relevance_factor, update_means=update_means, update_variances=update_variances,update_weights=update_weights)
+    
+    self.convergence_threshold  = convergence_threshold
+    self.max_iterations         = max_iterations
+    self.converge_by_likelihood = converge_by_likelihood
+
+
+ 
+
+
+  def train(self, gmm_machine, data):
+    """
+    Train the :py:class:bob.learn.em.GMMMachine using data
+
+    Keyword Parameters:
+      gmm_machine
+        The :py:class:bob.learn.em.GMMMachine class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(gmm_machine);
+
+    #Do the Expectation-Maximization algorithm
+    average_output_previous = 0
+    average_output = -numpy.inf;
+
+
+    #eStep
+    self.eStep(gmm_machine, data);
+
+    if(self.converge_by_likelihood):
+      average_output = self.compute_likelihood(gmm_machine);    
+
+    for i in range(self.max_iterations):
+      #saves average output from last iteration
+      average_output_previous = average_output;
+
+      #mStep
+      self.mStep(gmm_machine);
+
+      #eStep
+      self.eStep(gmm_machine, data);
+
+      #Computes log likelihood if required
+      if(self.converge_by_likelihood):
+        average_output = self.compute_likelihood(gmm_machine);
+
+        #Terminates if converged (and likelihood computation is set)
+        if abs((average_output_previous - average_output)/average_output_previous) <= self.convergence_threshold:
+          break
+
+
+# copy the documentation from the base class
+__doc__ = _MAP_GMMTrainer.__doc__
diff --git a/bob/learn/em/__ML_gmm_trainer__.py b/bob/learn/em/__ML_gmm_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..35ac30c4dfbf104d3607f2260a5a2bbd828ba2b4
--- /dev/null
+++ b/bob/learn/em/__ML_gmm_trainer__.py
@@ -0,0 +1,85 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Mon Jan 22 18:29:10 2015
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _ML_GMMTrainer
+import numpy
+
+# define the class
+class ML_GMMTrainer(_ML_GMMTrainer):
+
+  def __init__(self, update_means=True, update_variances=False, update_weights=False, convergence_threshold=0.001, max_iterations=10, converge_by_likelihood=True):
+    """
+    :py:class:bob.learn.em.ML_GMMTrainer constructor
+
+    Keyword Parameters:
+      update_means
+
+      update_variances
+
+      update_weights
+ 
+      convergence_threshold
+        Convergence threshold
+      max_iterations
+        Number of maximum iterations
+      converge_by_likelihood
+        Tells whether we compute log_likelihood as a convergence criteria, or not 
+        
+    """
+
+    _ML_GMMTrainer.__init__(self, update_means=update_means, update_variances=update_variances, update_weights=update_weights)
+    self.convergence_threshold  = convergence_threshold
+    self.max_iterations         = max_iterations
+    self.converge_by_likelihood = converge_by_likelihood
+
+
+  def train(self, gmm_machine, data):
+    """
+    Train the :py:class:bob.learn.em.GMMMachine using data
+
+    Keyword Parameters:
+      gmm_machine
+        The :py:class:bob.learn.em.GMMMachine class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(gmm_machine);
+
+    #Do the Expectation-Maximization algorithm
+    average_output_previous = 0
+    average_output = -numpy.inf;
+
+
+    #eStep
+    self.eStep(gmm_machine, data);
+
+    if(self.converge_by_likelihood):
+      average_output = self.compute_likelihood(gmm_machine);    
+
+    for i in range(self.max_iterations):
+      #saves average output from last iteration
+      average_output_previous = average_output;
+
+      #mStep
+      self.mStep(gmm_machine);
+
+      #eStep
+      self.eStep(gmm_machine, data);
+
+      #Computes log likelihood if required
+      if(self.converge_by_likelihood):
+        average_output = self.compute_likelihood(gmm_machine);
+
+        #Terminates if converged (and likelihood computation is set)
+        if abs((average_output_previous - average_output)/average_output_previous) <= self.convergence_threshold:
+          break
+
+
+# copy the documentation from the base class
+__doc__ = _ML_GMMTrainer.__doc__
diff --git a/bob/learn/em/__empca_trainer__.py b/bob/learn/em/__empca_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d710dc8d4f0e8a067eca6f2ba9109a4c1d56ebb5
--- /dev/null
+++ b/bob/learn/em/__empca_trainer__.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Wed Fev 04 13:35:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _EMPCATrainer
+import numpy
+
+# define the class
+class EMPCATrainer (_EMPCATrainer):
+
+  def __init__(self, convergence_threshold=0.001, max_iterations=10, compute_likelihood=True):
+    """
+    :py:class:`bob.learn.em.EMPCATrainer` constructor
+
+    Keyword Parameters:
+      convergence_threshold
+        Convergence threshold
+      max_iterations
+        Number of maximum iterations
+      compute_likelihood
+        
+    """
+
+    _EMPCATrainer.__init__(self,convergence_threshold)
+    self._max_iterations        = max_iterations
+    self._compute_likelihood    = compute_likelihood
+
+
+  def train(self, linear_machine, data):
+    """
+    Train the :py:class:bob.learn.em.LinearMachine using data
+
+    Keyword Parameters:
+      linear_machine
+        The :py:class:bob.learn.em.LinearMachine class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(linear_machine, data);
+      
+    #Do the Expectation-Maximization algorithm
+    average_output_previous = 0
+    average_output = -numpy.inf;
+
+    #eStep
+    self.eStep(linear_machine, data);
+
+    if(self._compute_likelihood):
+      average_output = self.compute_likelihood(linear_machine);
+    
+    for i in range(self._max_iterations):
+
+      #saves average output from last iteration
+      average_output_previous = average_output;
+
+      #mStep
+      self.mStep(linear_machine);
+
+      #eStep
+      self.eStep(linear_machine, data);
+
+      #Computes log likelihood if required
+      if(self._compute_likelihood):
+        average_output = self.compute_likelihood(linear_machine);
+
+        #Terminates if converged (and likelihood computation is set)
+        if abs((average_output_previous - average_output)/average_output_previous) <= self._convergence_threshold:
+          break
+
+
+# copy the documentation from the base class
+__doc__ = _EMPCATrainer.__doc__
diff --git a/bob/learn/em/__init__.py b/bob/learn/em/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..dbe487be5ec210c322fce55e9e014ca96e82840a
--- /dev/null
+++ b/bob/learn/em/__init__.py
@@ -0,0 +1,58 @@
+# import Libraries of other lib packages
+import bob.io.base
+import bob.math
+import bob.learn.linear
+
+# import our own Library
+import bob.extension
+bob.extension.load_bob_library('bob.learn.em', __file__)
+
+#from ._old_library import *
+from ._library import *
+from . import version
+from .version import module as __version__
+from .__kmeans_trainer__ import *
+from .__ML_gmm_trainer__ import *
+from .__MAP_gmm_trainer__ import *
+from .__jfa_trainer__ import *
+from .__isv_trainer__ import *
+from .__ivector_trainer__ import *
+from .__plda_trainer__ import *
+
+
+def ztnorm_same_value(vect_a, vect_b):
+  """Computes the matrix of boolean D for the ZT-norm, which indicates where
+     the client ids of the T-Norm models and Z-Norm samples match.
+
+     vect_a An (ordered) list of client_id corresponding to the T-Norm models
+     vect_b An (ordered) list of client_id corresponding to the Z-Norm impostor samples
+  """
+  import numpy
+  sameMatrix = numpy.ndarray((len(vect_a), len(vect_b)), 'bool')
+  for j in range(len(vect_a)):
+    for i in range(len(vect_b)):
+      sameMatrix[j, i] = (vect_a[j] == vect_b[i])
+  return sameMatrix
+
+
+def get_config():
+  """Returns a string containing the configuration information.
+  """
+
+  import pkg_resources
+  from .version import externals
+
+  packages = pkg_resources.require(__name__)
+  this = packages[0]
+  deps = packages[1:]
+
+  retval =  "%s: %s (%s)\n" % (this.key, this.version, this.location)
+  retval += "  - c/c++ dependencies:\n"
+  for k in sorted(externals): retval += "    - %s: %s\n" % (k, externals[k])
+  retval += "  - python dependencies:\n"
+  for d in deps: retval += "    - %s: %s (%s)\n" % (d.key, d.version, d.location)
+
+  return retval.strip()
+
+# gets sphinx autodoc done right - don't remove it
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/learn/em/__isv_trainer__.py b/bob/learn/em/__isv_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..9f4b84fa5498b2dd029ec743038cf112514a95fd
--- /dev/null
+++ b/bob/learn/em/__isv_trainer__.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Mon Fev 02 21:40:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _ISVTrainer
+import numpy
+
+# define the class
+class ISVTrainer (_ISVTrainer):
+
+  def __init__(self, max_iterations=10, relevance_factor=4.):
+    """
+    :py:class:`bob.learn.em.ISVTrainer` constructor
+
+    Keyword Parameters:
+      max_iterations
+        Number of maximum iterations
+    """
+    _ISVTrainer.__init__(self, relevance_factor)
+    self._max_iterations         = max_iterations
+
+
+  def train(self, isv_base, data):
+    """
+    Train the :py:class:`bob.learn.em.ISVBase` using data
+
+    Keyword Parameters:
+      jfa_base
+        The `:py:class:bob.learn.em.ISVBase` class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(isv_base, data);
+      
+    for i in range(self._max_iterations):
+      #eStep
+      self.eStep(isv_base, data);
+      #mStep
+      self.mStep(isv_base);
+
+
+
+# copy the documentation from the base class
+__doc__ = _ISVTrainer.__doc__
diff --git a/bob/learn/em/__ivector_trainer__.py b/bob/learn/em/__ivector_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b31e0ed8c431deccdd8610faaf45cd5252786766
--- /dev/null
+++ b/bob/learn/em/__ivector_trainer__.py
@@ -0,0 +1,51 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Tue Fev 03 13:20:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _IVectorTrainer
+import numpy
+
+# define the class
+class IVectorTrainer (_IVectorTrainer):
+
+  def __init__(self, max_iterations=10, update_sigma=False):
+    """
+    :py:class:`bob.learn.em.IVectorTrainer` constructor
+
+    Keyword Parameters:
+      max_iterations
+        Number of maximum iterations
+      update_sigma
+        
+    """
+    _IVectorTrainer.__init__(self, update_sigma)
+    self._max_iterations         = max_iterations
+
+
+  def train(self, ivector_machine, data):
+    """
+    Train the :py:class:`bob.learn.em.IVectorMachine` using data
+
+    Keyword Parameters:
+      ivector_machine
+        The `:py:class:bob.learn.em.IVectorMachine` class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(ivector_machine, data);
+      
+    for i in range(self._max_iterations):
+      #eStep
+      self.eStep(ivector_machine, data);
+      #mStep
+      self.mStep(ivector_machine);
+
+
+
+# copy the documentation from the base class
+__doc__ = _IVectorTrainer.__doc__
diff --git a/bob/learn/em/__jfa_trainer__.py b/bob/learn/em/__jfa_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2d8ccec0605c124e3bc0922179543014773d7255
--- /dev/null
+++ b/bob/learn/em/__jfa_trainer__.py
@@ -0,0 +1,71 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Sun Fev 01 21:10:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _JFATrainer
+import numpy
+
+# define the class
+class JFATrainer (_JFATrainer):
+
+  def __init__(self, max_iterations=10):
+    """
+    :py:class:`bob.learn.em.JFATrainer` constructor
+
+    Keyword Parameters:
+      max_iterations
+        Number of maximum iterations
+    """
+
+    _JFATrainer.__init__(self)
+    self._max_iterations         = max_iterations
+
+
+  def train_loop(self, jfa_base, data):
+    """
+    Train the :py:class:`bob.learn.em.JFABase` using data
+
+    Keyword Parameters:
+      jfa_base
+        The `:py:class:bob.learn.em.JFABase` class
+      data
+        The data to be trained
+    """
+    #V Subspace
+    for i in range(self._max_iterations):
+      self.e_step1(jfa_base, data)
+      self.m_step1(jfa_base, data)
+    self.finalize1(jfa_base, data)
+
+    #U subspace
+    for i in range(self._max_iterations):
+      self.e_step2(jfa_base, data)
+      self.m_step2(jfa_base, data)
+    self.finalize2(jfa_base, data)
+
+    # d subspace
+    for i in range(self._max_iterations):
+      self.e_step3(jfa_base, data)
+      self.m_step3(jfa_base, data)
+    self.finalize3(jfa_base, data)
+
+
+  def train(self, jfa_base, data):
+    """
+    Train the :py:class:`bob.learn.em.JFABase` using data
+
+    Keyword Parameters:
+      jfa_base
+        The `:py:class:bob.learn.em.JFABase` class
+      data
+        The data to be trained
+    """
+    self.initialize(jfa_base, data)
+    self.train_loop(jfa_base, data)
+
+
+# copy the documentation from the base class
+__doc__ = _JFATrainer.__doc__
diff --git a/bob/learn/em/__kmeans_trainer__.py b/bob/learn/em/__kmeans_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b7ef48e28452e3b79208a64782e252d1856f495f
--- /dev/null
+++ b/bob/learn/em/__kmeans_trainer__.py
@@ -0,0 +1,81 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Mon Jan 19 11:35:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _KMeansTrainer
+import numpy
+
+# define the class
+class KMeansTrainer (_KMeansTrainer):
+
+  def __init__(self, initialization_method="RANDOM", convergence_threshold=0.001, max_iterations=10, converge_by_average_min_distance=True):
+    """
+    :py:class:`bob.learn.em.KMeansTrainer` constructor
+
+    Keyword Parameters:
+      initialization_method
+        The initialization method to generate the initial means
+      convergence_threshold
+        Convergence threshold
+      max_iterations
+        Number of maximum iterations
+      converge_by_average_min_distance
+        Tells whether we compute the average min (square Euclidean) distance, as a convergence criteria, or not 
+        
+    """
+
+    _KMeansTrainer.__init__(self, initialization_method="RANDOM", )
+    self._convergence_threshold = convergence_threshold
+    self._max_iterations         = max_iterations
+    self._converge_by_average_min_distance = converge_by_average_min_distance
+
+
+  def train(self, kmeans_machine, data):
+    """
+    Train the :py:class:bob.learn.em.KMeansMachine using data
+
+    Keyword Parameters:
+      kmeans_machine
+        The :py:class:bob.learn.em.KMeansMachine class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(kmeans_machine, data);
+      
+    #Do the Expectation-Maximization algorithm
+    average_output_previous = 0
+    average_output = -numpy.inf;
+
+    #eStep
+    self.eStep(kmeans_machine, data);
+
+    if(self._converge_by_average_min_distance):
+      average_output = self.compute_likelihood(kmeans_machine);
+    
+    for i in range(self._max_iterations):
+
+      #saves average output from last iteration
+      average_output_previous = average_output;
+
+      #mStep
+      self.mStep(kmeans_machine);
+
+      #eStep
+      self.eStep(kmeans_machine, data);
+
+      #Computes log likelihood if required
+      if(self._converge_by_average_min_distance):
+        average_output = self.compute_likelihood(kmeans_machine);
+
+        #Terminates if converged (and likelihood computation is set)
+        if abs((average_output_previous - average_output)/average_output_previous) <= self._convergence_threshold:
+          break
+
+
+# copy the documentation from the base class
+__doc__ = _KMeansTrainer.__doc__
diff --git a/bob/learn/em/__plda_trainer__.py b/bob/learn/em/__plda_trainer__.py
new file mode 100644
index 0000000000000000000000000000000000000000..3989b8d23a7071f70892a514852d76ad68922510
--- /dev/null
+++ b/bob/learn/em/__plda_trainer__.py
@@ -0,0 +1,50 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Mon Fev 02 21:40:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _PLDATrainer
+import numpy
+
+# define the class
+class PLDATrainer (_PLDATrainer):
+
+  def __init__(self, max_iterations=10, use_sum_second_order=False):
+    """
+    :py:class:`bob.learn.em.PLDATrainer` constructor
+
+    Keyword Parameters:
+      max_iterations
+        Number of maximum iterations
+    """
+    _PLDATrainer.__init__(self, use_sum_second_order)
+    self._max_iterations         = max_iterations
+
+
+  def train(self, plda_base, data):
+    """
+    Train the :py:class:`bob.learn.em.PLDABase` using data
+
+    Keyword Parameters:
+      jfa_base
+        The `:py:class:bob.learn.em.PLDABase` class
+      data
+        The data to be trained
+    """
+
+    #Initialization
+    self.initialize(plda_base, data);
+      
+    for i in range(self._max_iterations):
+      #eStep
+      self.e_step(plda_base, data);
+      #mStep
+      self.m_step(plda_base, data);
+    self.finalize(plda_base, data);
+
+
+
+# copy the documentation from the base class
+__doc__ = _PLDATrainer.__doc__
diff --git a/bob/learn/em/cpp/BICMachine.cpp b/bob/learn/em/cpp/BICMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..4abe72429279a6f6a71fe88f8ac7cd858c79ec0a
--- /dev/null
+++ b/bob/learn/em/cpp/BICMachine.cpp
@@ -0,0 +1,348 @@
+/**
+ * @date Tue Jun  5 16:54:27 CEST 2012
+ * @author Manuel Guenther <Manuel.Guenther@idiap.ch>
+ *
+ * A machine that implements the liner projection of input to the output using
+ * weights, biases and sums:
+ * output = sum(inputs * weights) + bias
+ * It is possible to setup the machine to previously normalize the input taking
+ * into consideration some input bias and division factor. It is also possible
+ * to set it up to have an activation function.
+ * A linear classifier. See C. M. Bishop, "Pattern Recognition and Machine
+ * Learning", chapter 4
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/BICMachine.h>
+#include <bob.math/linear.h>
+#include <bob.core/assert.h>
+#include <bob.core/check.h>
+
+/**
+ * Initializes an empty BIC Machine
+ *
+ * @param use_DFFS  Add the Distance From Feature Space during score computation?
+ */
+bob::learn::em::BICMachine::BICMachine(bool use_DFFS)
+:
+  m_project_data(use_DFFS),
+  m_use_DFFS(use_DFFS)
+{}
+
+/**
+ * Assigns the other BICMachine to this, i.e., makes a deep copy of the given machine.
+ *
+ * @param  other  The other BICMachine to get a shallow copy of
+ * @return a reference to *this
+ */
+bob::learn::em::BICMachine::BICMachine(const BICMachine& other)
+:
+  m_project_data(other.m_project_data),
+  m_use_DFFS(other.m_use_DFFS)
+{
+  if (m_project_data){
+    setBIC(false, other.m_mu_I, other.m_lambda_I, other.m_Phi_I, other.m_rho_I, true);
+    setBIC(true , other.m_mu_E, other.m_lambda_E, other.m_Phi_E, other.m_rho_E, true);
+  } else {
+    setIEC(false, other.m_mu_I, other.m_lambda_I, true);
+    setIEC(true , other.m_mu_E, other.m_lambda_E, true);
+  }
+}
+
+/**
+ * Assigns the other BICMachine to this, i.e., makes a deep copy of the given BICMachine
+ *
+ * @param  other  The other BICMachine to get a deep copy of
+ * @return a reference to *this
+ */
+bob::learn::em::BICMachine& bob::learn::em::BICMachine::operator=(const BICMachine& other)
+{
+  if (this != &other)
+  {
+    if (other.m_project_data){
+      m_use_DFFS = other.m_use_DFFS;
+      setBIC(false, other.m_mu_I, other.m_lambda_I, other.m_Phi_I, other.m_rho_I, true);
+      setBIC(true , other.m_mu_E, other.m_lambda_E, other.m_Phi_E, other.m_rho_E, true);
+    } else {
+      m_use_DFFS = false;
+      setIEC(false, other.m_mu_I, other.m_lambda_I, true);
+      setIEC(true , other.m_mu_E, other.m_lambda_E, true);
+    }
+  }
+  return *this;
+}
+
+/**
+ * Compares if this machine and the given one are identical
+ *
+ * @param  other  The BICMachine to compare with
+ * @return true if both machines are identical, i.e., have exactly the same parameters, otherwise false
+ */
+bool bob::learn::em::BICMachine::operator==(const BICMachine& other) const
+{
+  return (m_project_data == other.m_project_data &&
+          (!m_project_data || m_use_DFFS == other.m_use_DFFS) &&
+          bob::core::array::isEqual(m_mu_I, other.m_mu_I) &&
+          bob::core::array::isEqual(m_mu_E, other.m_mu_E) &&
+          bob::core::array::isEqual(m_lambda_I, other.m_lambda_I) &&
+          bob::core::array::isEqual(m_lambda_E, other.m_lambda_E) &&
+          (!m_project_data ||
+              (bob::core::array::isEqual(m_Phi_I, other.m_Phi_I) &&
+               bob::core::array::isEqual(m_Phi_E, other.m_Phi_E) &&
+               (!m_use_DFFS || (m_rho_I == other.m_rho_I && m_rho_E == other.m_rho_E)))));
+}
+
+/**
+ * Checks if this machine and the given one are different
+ *
+ * @param  other  The BICMachine to compare with
+ * @return false if both machines are identical, i.e., have exactly the same parameters, otherwise true
+ */
+bool bob::learn::em::BICMachine::operator!=(const BICMachine& other) const
+{
+  return !(this->operator==(other));
+}
+
+/**
+ * Compares the given machine with this for similarity
+ *
+ * @param  other  The BICMachine to compare with
+ * @param  r_epsilon  The largest value any parameter might relatively differ between the two machines
+ * @param  a_epsilon  The largest value any parameter might absolutely differ between the two machines
+
+ * @return true if both machines are approximately equal, otherwise false
+ */
+bool bob::learn::em::BICMachine::is_similar_to(const BICMachine& other,
+  const double r_epsilon, const double a_epsilon) const
+{
+  if (m_project_data){
+    // compare data
+    if (not bob::core::array::hasSameShape(m_Phi_I, other.m_Phi_I)) return false;
+    if (not bob::core::array::hasSameShape(m_Phi_E, other.m_Phi_E)) return false;
+    // check that the projection matrices are close,
+    // but allow that eigen vectors might have opposite directions
+    // (i.e., they are either identical -> difference is 0, or opposite -> sum is zero)
+    for (int i = m_Phi_I.extent(1); i--;){
+      const blitz::Array<double,1>& sub1 = m_Phi_I(blitz::Range::all(), i);
+      const blitz::Array<double,1>& sub2 = other.m_Phi_I(blitz::Range::all(), i);
+      blitz::Array<double,1> sub2_negative(-sub2);
+      if (!bob::core::array::isClose(sub1, sub2, r_epsilon, a_epsilon) && !bob::core::array::isClose(sub1, sub2_negative, r_epsilon, a_epsilon)) return false;
+    }
+    for (int i = m_Phi_E.shape()[1]; i--;){
+      const blitz::Array<double,1>& sub1 = m_Phi_E(blitz::Range::all(), i);
+      const blitz::Array<double,1>& sub2 = other.m_Phi_E(blitz::Range::all(), i);
+      blitz::Array<double,1> sub2_negative(-sub2);
+      if (!bob::core::array::isClose(sub1, sub2, r_epsilon, a_epsilon) && !bob::core::array::isClose(sub1, sub2_negative, r_epsilon, a_epsilon)) return false;
+    }
+  }
+
+  return (m_project_data == other.m_project_data &&
+          (!m_project_data || m_use_DFFS == other.m_use_DFFS) &&
+          bob::core::array::isClose(m_mu_I, other.m_mu_I, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_mu_E, other.m_mu_E, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_lambda_I, other.m_lambda_I, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_lambda_E, other.m_lambda_E, r_epsilon, a_epsilon) &&
+          (!m_project_data ||
+               (!m_use_DFFS || (bob::core::isClose(m_rho_I, other.m_rho_I, r_epsilon, a_epsilon) &&
+                                bob::core::isClose(m_rho_E, other.m_rho_E, r_epsilon, a_epsilon)))));
+}
+
+
+
+void bob::learn::em::BICMachine::initialize(bool clazz, int input_length, int projected_length){
+  blitz::Array<double,1>& diff = clazz ? m_diff_E : m_diff_I;
+  blitz::Array<double,1>& proj = clazz ? m_proj_E : m_proj_I;
+  diff.resize(input_length);
+  proj.resize(projected_length);
+}
+
+/**
+ * Sets the parameters of the given class that are required for computing the IEC scores (Guenther, Wuertz)
+ *
+ * @param  clazz   false for the intrapersonal class, true for the extrapersonal one.
+ * @param  mean    The mean vector of the training data
+ * @param  variances  The variances of the training data
+ * @param  copy_data  If true, makes a deep copy of the matrices, otherwise it just references it (the default)
+ */
+void bob::learn::em::BICMachine::setIEC(
+    bool clazz,
+    const blitz::Array<double,1>& mean,
+    const blitz::Array<double,1>& variances,
+    bool copy_data
+){
+  m_project_data = false;
+  // select the right matrices to write
+  blitz::Array<double,1>& mu = clazz ? m_mu_E : m_mu_I;
+  blitz::Array<double,1>& lambda = clazz ? m_lambda_E : m_lambda_I;
+
+  // copy mean and variances
+  if (copy_data){
+    mu.resize(mean.shape());
+    mu = mean;
+    lambda.resize(variances.shape());
+    lambda = variances;
+  } else {
+    mu.reference(mean);
+    lambda.reference(variances);
+  }
+}
+
+/**
+ * Sets the parameters of the given class that are required for computing the BIC scores (Teixeira)
+ *
+ * @param  clazz   false for the intrapersonal class, true for the extrapersonal one.
+ * @param  mean    The mean vector of the training data
+ * @param  variances  The eigenvalues of the training data
+ * @param  projection  The PCA projection matrix
+ * @param  rho     The residual eigenvalues, used for DFFS calculation
+ * @param  copy_data  If true, makes a deep copy of the matrices, otherwise it just references it (the default)
+ */
+void bob::learn::em::BICMachine::setBIC(
+    bool clazz,
+    const blitz::Array<double,1>& mean,
+    const blitz::Array<double,1>& variances,
+    const blitz::Array<double,2>& projection,
+    const double rho,
+    bool copy_data
+){
+  m_project_data = true;
+  // select the right matrices to write
+  blitz::Array<double,1>& mu = clazz ? m_mu_E : m_mu_I;
+  blitz::Array<double,1>& lambda = clazz ? m_lambda_E : m_lambda_I;
+  blitz::Array<double,2>& Phi = clazz ? m_Phi_E : m_Phi_I;
+  double& rho_ = clazz ? m_rho_E : m_rho_I;
+
+  // copy information
+  if (copy_data){
+    mu.resize(mean.shape());
+    mu = mean;
+    lambda.resize(variances.shape());
+    lambda = variances;
+    Phi.resize(projection.shape());
+    Phi = projection;
+  } else {
+    mu.reference(mean);
+    lambda.reference(variances);
+    Phi.reference(projection);
+  }
+  rho_ = rho;
+
+  // check that rho has a reasonable value (if it is used)
+  if (m_use_DFFS && rho_ < 1e-12) throw std::runtime_error("The given average eigenvalue (rho) is too close to zero");
+
+  // initialize temporaries
+  initialize(clazz, Phi.shape()[0], Phi.shape()[1]);
+}
+
+/**
+ * Set or unset the usage of the Distance From Feature Space
+ *
+ * @param use_DFFS The new value of use_DFFS
+ */
+void bob::learn::em::BICMachine::use_DFFS(bool use_DFFS){
+  m_use_DFFS = use_DFFS;
+  if (m_project_data && m_use_DFFS && (m_rho_E < 1e-12 || m_rho_I < 1e-12)) std::runtime_error("The average eigenvalue (rho) is too close to zero, so using DFFS will not work");
+}
+
+/**
+ * Loads the BICMachine from the given hdf5 file.
+ *
+ * @param  config  The hdf5 file containing the required information.
+ */
+void bob::learn::em::BICMachine::load(bob::io::base::HDF5File& config){
+  //reads all data directly into the member variables
+  m_project_data = config.read<bool>("project_data");
+  m_mu_I.reference(config.readArray<double,1>("intra_mean"));
+  m_lambda_I.reference(config.readArray<double,1>("intra_variance"));
+  if (m_project_data){
+    m_use_DFFS = config.read<bool>("use_DFFS");
+    m_Phi_I.reference(config.readArray<double,2>("intra_subspace"));
+    initialize(false, m_Phi_I.shape()[0], m_Phi_I.shape()[1]);
+    m_rho_I = config.read<double>("intra_rho");
+  }
+
+  m_mu_E.reference(config.readArray<double,1>("extra_mean"));
+  m_lambda_E.reference(config.readArray<double,1>("extra_variance"));
+  if (m_project_data){
+    m_Phi_E.reference(config.readArray<double,2>("extra_subspace"));
+    initialize(true, m_Phi_E.shape()[0], m_Phi_E.shape()[1]);
+    m_rho_E = config.read<double>("extra_rho");
+  }
+  // check that rho has reasonable values
+  if (m_project_data && m_use_DFFS && (m_rho_E < 1e-12 || m_rho_I < 1e-12)) throw std::runtime_error("The loaded average eigenvalue (rho) is too close to zero");
+
+}
+
+/**
+ * Saves the parameters of the BICMachine to the given hdf5 file.
+ *
+ * @param  config  The hdf5 file to write the configuration into.
+ */
+void bob::learn::em::BICMachine::save(bob::io::base::HDF5File& config) const{
+  config.set("project_data", m_project_data);
+  config.setArray("intra_mean", m_mu_I);
+  config.setArray("intra_variance", m_lambda_I);
+  if (m_project_data){
+    config.set("use_DFFS", m_use_DFFS);
+    config.setArray("intra_subspace", m_Phi_I);
+    config.set("intra_rho", m_rho_I);
+  }
+
+  config.setArray("extra_mean", m_mu_E);
+  config.setArray("extra_variance", m_lambda_E);
+  if (m_project_data){
+    config.setArray("extra_subspace", m_Phi_E);
+    config.set("extra_rho", m_rho_E);
+  }
+}
+
+/**
+ * Computes the BIC or IEC score for the given input vector.
+ * The score itself is the log-likelihood score of the given input vector belonging to the intrapersonal class.
+ * No sanity checks of input and output are performed.
+ *
+ * @param  input  A vector (of difference values) to compute the BIC or IEC score for.
+ * @param  output The one-element array that will contain the score afterwards.
+ */
+void bob::learn::em::BICMachine::forward_(const blitz::Array<double,1>& input, double& output) const{
+  if (m_project_data){
+    // subtract mean
+    m_diff_I = input - m_mu_I;
+    m_diff_E = input - m_mu_E;
+    // project data to intrapersonal and extrapersonal subspace
+    bob::math::prod(m_diff_I, m_Phi_I, m_proj_I);
+    bob::math::prod(m_diff_E, m_Phi_E, m_proj_E);
+
+    // compute Mahalanobis distance
+    output = blitz::sum(blitz::pow2(m_proj_E) / m_lambda_E) - blitz::sum(blitz::pow2(m_proj_I) / m_lambda_I);
+
+    // add the DFFS?
+    if (m_use_DFFS){
+      output += blitz::sum(blitz::pow2(m_diff_E) - blitz::pow2(m_proj_E)) / m_rho_E;
+      output -= blitz::sum(blitz::pow2(m_diff_I) - blitz::pow2(m_proj_I)) / m_rho_I;
+    }
+    output /= (m_proj_E.extent(0) + m_proj_I.extent(0));
+  } else {
+    // forward without projection
+    output = blitz::mean( blitz::pow2(input - m_mu_E) / m_lambda_E
+                        - blitz::pow2(input - m_mu_I) / m_lambda_I);
+  }
+}
+
+/**
+ * Computes the BIC or IEC score for the given input vector.
+ * The score itself is the log-likelihood score of the given input vector belonging to the intrapersonal class.
+ * Sanity checks of input and output shape are performed.
+ *
+ * @param  input  A vector (of difference values) to compute the BIC or IEC score for.
+ * @param  output The one-element array that will contain the score afterwards.
+ */
+void bob::learn::em::BICMachine::forward(const blitz::Array<double,1>& input, double& output) const{
+  // perform some checks
+  bob::core::array::assertSameShape(input, m_mu_E);
+
+  // call the actual method
+  forward_(input, output);
+}
+
diff --git a/bob/learn/em/cpp/BICTrainer.cpp b/bob/learn/em/cpp/BICTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..16d0726bd4575fa2a249555777cd5d17a616241c
--- /dev/null
+++ b/bob/learn/em/cpp/BICTrainer.cpp
@@ -0,0 +1,94 @@
+/**
+ * @date Wed Jun  6 10:29:09 CEST 2012
+ * @author Manuel Guenther <Manuel.Guenther@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/BICTrainer.h>
+#include <bob.learn.linear/pca.h>
+#include <bob.learn.linear/machine.h>
+
+static double sqr(const double& x){
+  return x*x;
+}
+
+/**
+ * This function trains one of the classes of the given machine with the given data.
+ * It computes either BIC projection matrices, or IEC mean and variance.
+ *
+ * @param  clazz    false for the intrapersonal class, true for the extrapersonal one.
+ * @param  machine  The machine to be trained.
+ * @param  differences  A set of (intra/extra)-personal difference vectors that should be trained.
+ */
+void bob::learn::em::BICTrainer::train_single(bool clazz, bob::learn::em::BICMachine& machine, const blitz::Array<double,2>& differences) const {
+  int subspace_dim = clazz ? m_M_E : m_M_I;
+  int input_dim = differences.extent(1);
+  int data_count = differences.extent(0);
+  blitz::Range a = blitz::Range::all();
+
+  if (subspace_dim){
+    // train the class using BIC
+
+    // Compute PCA on the given dataset
+    bob::learn::linear::PCATrainer trainer;
+    const int n_eigs = trainer.output_size(differences);
+    bob::learn::linear::Machine pca(input_dim, n_eigs);
+    blitz::Array<double,1> variances(n_eigs);
+    trainer.train(pca, variances, differences);
+
+    // compute rho
+    double rho = 0.;
+    int non_zero_eigenvalues = std::min(input_dim, data_count-1);
+    // assert that the number of kept eigenvalues is not chosen to big
+    if (subspace_dim >= non_zero_eigenvalues)
+      throw std::runtime_error((boost::format("The chosen subspace dimension %d is larger than the theoretical number of nonzero eigenvalues %d")%subspace_dim%non_zero_eigenvalues).str());
+    // compute the average of the reminding eigenvalues
+    for (int i = subspace_dim; i < non_zero_eigenvalues; ++i){
+      rho += variances(i);
+    }
+    rho /= non_zero_eigenvalues - subspace_dim;
+
+    // limit dimensionalities
+    pca.resize(input_dim, subspace_dim);
+    variances.resizeAndPreserve(subspace_dim);
+
+    // check that all variances are meaningful
+    for (int i = 0; i < subspace_dim; ++i){
+      if (variances(i) < 1e-12)
+        throw std::runtime_error((boost::format("The chosen subspace dimension is %d, but the %dth eigenvalue is already to small")%subspace_dim%i).str());
+    }
+
+    // initialize the machine
+    blitz::Array<double, 2> projection = pca.getWeights();
+    blitz::Array<double, 1> mean = pca.getInputSubtraction();
+    machine.setBIC(clazz, mean, variances, projection, rho);
+  } else {
+    // train the class using IEC
+    // => compute mean and variance only
+    blitz::Array<double,1> mean(input_dim), variance(input_dim);
+
+    // compute mean and variance
+    mean = 0.;
+    variance = 0.;
+    for (int n = data_count; n--;){
+      const blitz::Array<double,1>& diff = differences(n,a);
+      assert(diff.shape()[0] == input_dim);
+      for (int i = input_dim; i--;){
+        mean(i) += diff(i);
+        variance(i) += sqr(diff(i));
+      }
+    }
+    // normalize mean and variances
+    for (int i = 0; i < input_dim; ++i){
+      // intrapersonal
+      variance(i) = (variance(i) - sqr(mean(i)) / data_count) / (data_count - 1.);
+      mean(i) /= data_count;
+      if (variance(i) < 1e-12)
+        throw std::runtime_error((boost::format("The variance of the %dth dimension is too small. Check your data!")%i).str());
+    }
+
+    // set the results to the machine
+    machine.setIEC(clazz, mean, variance);
+  }
+}
diff --git a/bob/learn/em/cpp/EMPCATrainer.cpp b/bob/learn/em/cpp/EMPCATrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c27a010a58a5152d7213c744e0dc83f8dba821bf
--- /dev/null
+++ b/bob/learn/em/cpp/EMPCATrainer.cpp
@@ -0,0 +1,418 @@
+/**
+ * @date Tue Oct 11 12:18:23 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <vector>
+#include <algorithm>
+#include <boost/random.hpp>
+#include <cmath>
+
+#include <bob.learn.em/EMPCATrainer.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/check.h>
+#include <bob.math/linear.h>
+#include <bob.math/det.h>
+#include <bob.math/inv.h>
+#include <bob.math/stats.h>
+
+bob::learn::em::EMPCATrainer::EMPCATrainer(bool compute_likelihood):
+  m_compute_likelihood(compute_likelihood),
+  m_rng(new boost::mt19937()),
+  m_S(0,0),
+  m_z_first_order(0,0), m_z_second_order(0,0,0),
+  m_inW(0,0), m_invM(0,0), m_sigma2(0), m_f_log2pi(0),
+  m_tmp_dxf(0,0), m_tmp_d(0), m_tmp_f(0),
+  m_tmp_dxd_1(0,0), m_tmp_dxd_2(0,0),
+  m_tmp_fxd_1(0,0), m_tmp_fxd_2(0,0),
+  m_tmp_fxf_1(0,0), m_tmp_fxf_2(0,0)
+{
+}
+
+bob::learn::em::EMPCATrainer::EMPCATrainer(const bob::learn::em::EMPCATrainer& other):
+  m_compute_likelihood(other.m_compute_likelihood),
+  m_rng(other.m_rng),
+  m_S(bob::core::array::ccopy(other.m_S)),
+  m_z_first_order(bob::core::array::ccopy(other.m_z_first_order)),
+  m_z_second_order(bob::core::array::ccopy(other.m_z_second_order)),
+  m_inW(bob::core::array::ccopy(other.m_inW)),
+  m_invM(bob::core::array::ccopy(other.m_invM)),
+  m_sigma2(other.m_sigma2), m_f_log2pi(other.m_f_log2pi),
+  m_tmp_dxf(bob::core::array::ccopy(other.m_tmp_dxf)),
+  m_tmp_d(bob::core::array::ccopy(other.m_tmp_d)),
+  m_tmp_f(bob::core::array::ccopy(other.m_tmp_f)),
+  m_tmp_dxd_1(bob::core::array::ccopy(other.m_tmp_dxd_1)),
+  m_tmp_dxd_2(bob::core::array::ccopy(other.m_tmp_dxd_2)),
+  m_tmp_fxd_1(bob::core::array::ccopy(other.m_tmp_fxd_1)),
+  m_tmp_fxd_2(bob::core::array::ccopy(other.m_tmp_fxd_2)),
+  m_tmp_fxf_1(bob::core::array::ccopy(other.m_tmp_fxf_1)),
+  m_tmp_fxf_2(bob::core::array::ccopy(other.m_tmp_fxf_2))
+{
+}
+
+bob::learn::em::EMPCATrainer::~EMPCATrainer()
+{
+}
+
+bob::learn::em::EMPCATrainer& bob::learn::em::EMPCATrainer::operator=
+  (const bob::learn::em::EMPCATrainer& other)
+{
+  if (this != &other)
+  {
+    m_rng                   = other.m_rng;
+	m_compute_likelihood    = other.m_compute_likelihood;
+    m_S = bob::core::array::ccopy(other.m_S);
+    m_z_first_order = bob::core::array::ccopy(other.m_z_first_order);
+    m_z_second_order = bob::core::array::ccopy(other.m_z_second_order);
+    m_inW = bob::core::array::ccopy(other.m_inW);
+    m_invM = bob::core::array::ccopy(other.m_invM);
+    m_sigma2 = other.m_sigma2;
+    m_f_log2pi = other.m_f_log2pi;
+    m_tmp_dxf = bob::core::array::ccopy(other.m_tmp_dxf);
+    m_tmp_d = bob::core::array::ccopy(other.m_tmp_d);
+    m_tmp_f = bob::core::array::ccopy(other.m_tmp_f);
+    m_tmp_dxd_1 = bob::core::array::ccopy(other.m_tmp_dxd_1);
+    m_tmp_dxd_2 = bob::core::array::ccopy(other.m_tmp_dxd_2);
+    m_tmp_fxd_1 = bob::core::array::ccopy(other.m_tmp_fxd_1);
+    m_tmp_fxd_2 = bob::core::array::ccopy(other.m_tmp_fxd_2);
+    m_tmp_fxf_1 = bob::core::array::ccopy(other.m_tmp_fxf_1);
+    m_tmp_fxf_2 = bob::core::array::ccopy(other.m_tmp_fxf_2);
+  }
+  return *this;
+}
+
+bool bob::learn::em::EMPCATrainer::operator==
+  (const bob::learn::em::EMPCATrainer &other) const
+{
+  return m_compute_likelihood == other.m_compute_likelihood &&
+        m_rng                   == other.m_rng &&
+        bob::core::array::isEqual(m_S, other.m_S) &&
+        bob::core::array::isEqual(m_z_first_order, other.m_z_first_order) &&
+        bob::core::array::isEqual(m_z_second_order, other.m_z_second_order) &&
+        bob::core::array::isEqual(m_inW, other.m_inW) &&
+        bob::core::array::isEqual(m_invM, other.m_invM) &&
+        m_sigma2 == other.m_sigma2 &&
+        m_f_log2pi == other.m_f_log2pi;
+}
+
+bool bob::learn::em::EMPCATrainer::operator!=
+  (const bob::learn::em::EMPCATrainer &other) const
+{
+  return !(this->operator==(other));
+}
+
+bool bob::learn::em::EMPCATrainer::is_similar_to
+  (const bob::learn::em::EMPCATrainer &other, const double r_epsilon,
+   const double a_epsilon) const
+{
+  return m_compute_likelihood == other.m_compute_likelihood &&
+         m_rng                == other.m_rng &&
+         bob::core::array::isClose(m_S, other.m_S, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_z_first_order, other.m_z_first_order, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_z_second_order, other.m_z_second_order, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_inW, other.m_inW, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_invM, other.m_invM, r_epsilon, a_epsilon) &&
+         bob::core::isClose(m_sigma2, other.m_sigma2, r_epsilon, a_epsilon) &&
+         bob::core::isClose(m_f_log2pi, other.m_f_log2pi, r_epsilon, a_epsilon);
+}
+
+void bob::learn::em::EMPCATrainer::initialize(bob::learn::linear::Machine& machine,
+  const blitz::Array<double,2>& ar)
+{
+  // reinitializes array members and checks dimensionality
+  initMembers(machine, ar);
+
+  // computes the mean and the covariance if required
+  computeMeanVariance(machine, ar);
+
+  // Random initialization of W and sigma2
+  initRandomWSigma2(machine);
+
+  // Computes the product m_inW = W^T.W
+  computeWtW(machine);
+  // Computes inverse(M), where M = Wt * W + sigma2 * Id
+  computeInvM();
+}
+
+
+void bob::learn::em::EMPCATrainer::initMembers(
+  const bob::learn::linear::Machine& machine,
+  const blitz::Array<double,2>& ar)
+{
+  // Gets dimensions
+  const size_t n_samples = ar.extent(0);
+  const size_t n_features = ar.extent(1);
+
+  // Checks that the dimensions are matching
+  const size_t n_inputs = machine.inputSize();
+  const size_t n_outputs = machine.outputSize();
+
+  // Checks that the dimensions are matching
+  if (n_inputs != n_features) {
+    boost::format m("number of inputs (%u) does not match the number of features (%u)");
+    m % n_inputs % n_features;
+    throw std::runtime_error(m.str());
+  }
+
+  // Covariance matrix S is only required to compute the log likelihood
+  if (m_compute_likelihood)
+    m_S.resize(n_features,n_features);
+  else
+    m_S.resize(0,0);
+  m_z_first_order.resize(n_samples, n_outputs);
+  m_z_second_order.resize(n_samples, n_outputs, n_outputs);
+  m_inW.resize(n_outputs, n_outputs);
+  m_invM.resize(n_outputs, n_outputs);
+  m_sigma2 = 0.;
+  m_f_log2pi = n_features * log(2*M_PI);
+
+  // Cache
+  m_tmp_dxf.resize(n_outputs, n_features);
+  m_tmp_d.resize(n_outputs);
+  m_tmp_f.resize(n_features);
+  m_tmp_dxd_1.resize(n_outputs, n_outputs);
+  m_tmp_dxd_2.resize(n_outputs, n_outputs);
+  m_tmp_fxd_1.resize(n_features, n_outputs);
+  m_tmp_fxd_2.resize(n_features, n_outputs);
+  // The following large cache matrices are only required to compute the
+  // log likelihood.
+  if (m_compute_likelihood)
+  {
+    m_tmp_fxf_1.resize(n_features, n_features);
+    m_tmp_fxf_2.resize(n_features, n_features);
+  }
+  else
+  {
+    m_tmp_fxf_1.resize(0,0);
+    m_tmp_fxf_2.resize(0,0);
+  }
+}
+
+void bob::learn::em::EMPCATrainer::computeMeanVariance(bob::learn::linear::Machine& machine,
+  const blitz::Array<double,2>& ar)
+{
+  size_t n_samples = ar.extent(0);
+  blitz::Array<double,1> mu = machine.updateInputSubtraction();
+  blitz::Range all = blitz::Range::all();
+  if (m_compute_likelihood)
+  {
+    // Mean and scatter computation
+    bob::math::scatter(ar, m_S, mu);
+    // divides scatter by N-1
+    m_S /= static_cast<double>(n_samples-1);
+  }
+  else
+  {
+    // computes the mean and updates mu
+    mu = 0.;
+    for (size_t i=0; i<n_samples; ++i)
+      mu += ar(i,all);
+    mu /= static_cast<double>(n_samples);
+  }
+}
+
+void bob::learn::em::EMPCATrainer::initRandomWSigma2(bob::learn::linear::Machine& machine)
+{
+  // Initializes the random number generator
+  boost::uniform_01<> range01;
+  boost::variate_generator<boost::mt19937&, boost::uniform_01<> > die(*m_rng, range01);
+
+  // W initialization (TODO: add method in core)
+  blitz::Array<double,2> W = machine.updateWeights();
+  double ratio = 2.; /// Follows matlab implementation using a ratio of 2
+  for (int i=0; i<W.extent(0); ++i)
+    for (int j=0; j<W.extent(1); ++j)
+      W(i,j) = die() * ratio;
+  // sigma2 initialization
+  m_sigma2 = die() * ratio;
+}
+
+void bob::learn::em::EMPCATrainer::computeWtW(bob::learn::linear::Machine& machine)
+{
+  const blitz::Array<double,2> W = machine.getWeights();
+  const blitz::Array<double,2> Wt = W.transpose(1,0);
+  bob::math::prod(Wt, W, m_inW);
+}
+
+void bob::learn::em::EMPCATrainer::computeInvM()
+{
+  // Compute inverse(M), where M = W^T * W + sigma2 * Id
+  bob::math::eye(m_tmp_dxd_1); // m_tmp_dxd_1 = Id
+  m_tmp_dxd_1 *= m_sigma2; // m_tmp_dxd_1 = sigma2 * Id
+  m_tmp_dxd_1 += m_inW; // m_tmp_dxd_1 = M = W^T * W + sigma2 * Id
+  bob::math::inv(m_tmp_dxd_1, m_invM); // m_invM = inv(M)
+}
+
+
+
+void bob::learn::em::EMPCATrainer::eStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
+{
+  // Gets mu and W from the machine
+  const blitz::Array<double,1>& mu = machine.getInputSubtraction();
+  const blitz::Array<double,2>& W = machine.getWeights();
+  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
+
+  // Computes the statistics
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<ar.extent(0); ++i)
+  {
+    /// 1/ First order statistics: \f$z_first_order_i = inv(M) W^T (t - \mu)\f$
+    // m_tmp_f = t (sample) - mu (normalized sample)
+    m_tmp_f = ar(i,a) - mu;
+    // m_tmp_dxf = inv(M) * W^T
+    bob::math::prod(m_invM, Wt, m_tmp_dxf);
+    blitz::Array<double,1> z_first_order_i = m_z_first_order(i,blitz::Range::all());
+    // z_first_order_i = inv(M) * W^T * (t - mu)
+    bob::math::prod(m_tmp_dxf, m_tmp_f, z_first_order_i);
+
+    /// 2/ Second order statistics:
+    ///     z_second_order_i = sigma2 * inv(M) + z_first_order_i * z_first_order_i^T
+    blitz::Array<double,2> z_second_order_i = m_z_second_order(i,blitz::Range::all(),blitz::Range::all());
+    // m_tmp_dxd = z_first_order_i * z_first_order_i^T
+    bob::math::prod(z_first_order_i, z_first_order_i, m_tmp_dxd_1); // outer product
+    // z_second_order_i = sigma2 * inv(M)
+    z_second_order_i = m_invM;
+    z_second_order_i *= m_sigma2;
+    // z_second_order_i = sigma2 * inv(M) + z_first_order_i * z_first_order_i^T
+    z_second_order_i += m_tmp_dxd_1;
+  }
+}
+
+void bob::learn::em::EMPCATrainer::mStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
+{
+  // 1/ New estimate of W
+  updateW(machine, ar);
+
+  // 2/ New estimate of sigma2
+  updateSigma2(machine, ar);
+
+  // Computes the new value of inverse(M), where M = Wt * W + sigma2 * Id
+  computeInvM();
+}
+
+void bob::learn::em::EMPCATrainer::updateW(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
+  // Get the mean mu and the projection matrix W
+  const blitz::Array<double,1>& mu = machine.getInputSubtraction();
+  blitz::Array<double,2>& W = machine.updateWeights();
+  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
+
+  // Compute W = sum{ (t_{i} - mu) z_first_order_i^T} * inv( sum{z_second_order_i} )
+  m_tmp_fxd_1 = 0.;
+  m_tmp_dxd_1 = 0.;
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<ar.extent(0); ++i)
+  {
+    // m_tmp_f = t (sample) - mu (normalized sample)
+    m_tmp_f = ar(i,a) - mu;
+    // first order statistics of sample i
+    blitz::Array<double,1> z_first_order_i = m_z_first_order(i,blitz::Range::all());
+    // m_tmp_fxd_2 = (t - mu)*z_first_order_i
+    bob::math::prod(m_tmp_f, z_first_order_i, m_tmp_fxd_2);
+    m_tmp_fxd_1 += m_tmp_fxd_2;
+
+    // second order statistics of sample i
+    blitz::Array<double,2> z_second_order_i = m_z_second_order(i,blitz::Range::all(),blitz::Range::all());
+    m_tmp_dxd_1 += z_second_order_i;
+  }
+
+  // m_tmp_dxd_2 = inv( sum(E(x_i.x_i^T)) )
+  bob::math::inv(m_tmp_dxd_1, m_tmp_dxd_2);
+  // New estimates of W
+  bob::math::prod(m_tmp_fxd_1, m_tmp_dxd_2, W);
+  // Updates W'*W as well
+  bob::math::prod(Wt, W, m_inW);
+}
+
+void bob::learn::em::EMPCATrainer::updateSigma2(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
+  // Get the mean mu and the projection matrix W
+  const blitz::Array<double,1>& mu = machine.getInputSubtraction();
+  const blitz::Array<double,2>& W = machine.getWeights();
+  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
+
+  m_sigma2 = 0.;
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<ar.extent(0); ++i)
+  {
+    // a. sigma2 += || t - mu ||^2
+    // m_tmp_f = t (sample) - mu (normalized sample)
+    m_tmp_f = ar(i,a) - mu;
+    // sigma2 += || t - mu ||^2
+    m_sigma2 += blitz::sum(blitz::pow2(m_tmp_f));
+
+    // b. sigma2 -= 2*E(x_i)^T*W^T*(t - mu)
+    // m_tmp_d = W^T*(t - mu)
+    bob::math::prod(Wt, m_tmp_f, m_tmp_d);
+    // first order of i
+    blitz::Array<double,1> z_first_order_i = m_z_first_order(i,blitz::Range::all());
+    // sigma2 -= 2*E(x_i)^T*W^T*(t - mu)
+    m_sigma2 -= 2*bob::math::dot(z_first_order_i, m_tmp_d);
+
+    // c. sigma2 += trace( E(x_i.x_i^T)*W^T*W )
+    // second order of i
+    blitz::Array<double,2> z_second_order_i = m_z_second_order(i,blitz::Range::all(),blitz::Range::all());
+    // m_tmp_dxd_1 = E(x_i.x_i^T)*W^T*W
+    bob::math::prod(z_second_order_i, m_inW, m_tmp_dxd_1);
+    // sigma2 += trace( E(x_i.x_i^T)*W^T*W )
+    m_sigma2 += bob::math::trace(m_tmp_dxd_1);
+  }
+  // Normalization factor
+  m_sigma2 /= (static_cast<double>(ar.extent(0)) * mu.extent(0));
+}
+
+double bob::learn::em::EMPCATrainer::computeLikelihood(bob::learn::linear::Machine& machine)
+{
+  // Get W projection matrix
+  const blitz::Array<double,2>& W = machine.getWeights();
+  const blitz::Array<double,2> Wt = W.transpose(1,0); // W^T
+  const size_t n_features = m_S.extent(0);
+
+  // 1/ Compute det(C), where C = sigma2.I + W.W^T
+  //            det(C) = det(sigma2 * C / sigma2) = det(sigma2 * Id) * det(C / sigma2)
+  //    We are using Sylvester's determinant theorem to compute a dxd
+  //    determinant rather than a fxf one: det(I + A.B) = det(I + B.A)
+  //            det(C) = sigma2^n_features * det(I + W.W^T/sigma2)
+  //                   = sigma2^n_features * det(I + W^T.W/sigma2) (cf. Bishop Appendix C)
+  // detC = det( eye(n_features) * sigma2 )
+
+  // detC = sigma2^n_features
+  double detC = pow(m_sigma2, n_features);
+  // m_tmp_dxd_1 = Id
+  bob::math::eye(m_tmp_dxd_1);
+  // m_tmp_dxd_2 = W^T.W
+  bob::math::prod(Wt, W, m_tmp_dxd_2);
+  // m_tmp_dxd_2 = W^T.W / sigma2
+  m_tmp_dxd_2 /= m_sigma2;
+  // m_tmp_dxd_1 = Id + W^T.W / sigma2
+  m_tmp_dxd_1 += m_tmp_dxd_2;
+  // detC = sigma2^n_features * det(I + W^T.W/sigma2)
+  detC *= bob::math::det(m_tmp_dxd_1);
+
+  // 2/ Compute inv(C), where C = sigma2.I + W.W^T
+  //    We are using the following identity (Property C.7 of Bishop's book)
+  //      (A + B.D^-1.C)^-1 = A^-1 - A^-1.B(D+C.A^-1.B)^-1.C.A^-1
+  //    Hence, inv(C) = sigma2^-1 .(I - W.M^-1.W^T)
+
+  // Compute inverse(M), where M = Wt * W + sigma2 * Id
+  computeInvM();
+  // m_tmp_fxf_1 = I = eye(n_features)
+  bob::math::eye(m_tmp_fxf_1);
+  // m_tmp_fxd_1 = W * inv(M)
+  bob::math::prod(W, m_invM, m_tmp_fxd_1);
+  // m_tmp_fxf_2 = (W * inv(M) * Wt)
+  bob::math::prod(m_tmp_fxd_1, Wt, m_tmp_fxf_2);
+  // m_tmp_fxd_1 = inv(C) = (I - W.M^-1.W^T) / sigma2
+  m_tmp_fxf_1 -= m_tmp_fxf_2;
+  m_tmp_fxf_1 /= m_sigma2;
+
+  // 3/ Compute inv(C).S
+  bob::math::prod(m_tmp_fxf_1, m_S, m_tmp_fxf_2);
+
+  // 4/ Use previous values to compute the log likelihood:
+  // Log likelihood =  - N/2*{ d*ln(2*PI) + ln |detC| + tr(C^-1.S) }
+  double llh = - static_cast<double>(m_z_first_order.extent(0)) / 2. *
+    ( m_f_log2pi + log(fabs(detC)) + bob::math::trace(m_tmp_fxf_2) );
+
+  return llh;
+}
diff --git a/bob/learn/em/cpp/FABase.cpp b/bob/learn/em/cpp/FABase.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..317a368d696668824f6be9a533c5ab6d4da8ff50
--- /dev/null
+++ b/bob/learn/em/cpp/FABase.cpp
@@ -0,0 +1,307 @@
+/**
+ * @date Tue Jan 27 15:51:15 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/FABase.h>
+#include <bob.core/array_copy.h>
+#include <bob.math/linear.h>
+#include <bob.math/inv.h>
+#include <limits>
+
+
+//////////////////// FABase ////////////////////
+bob::learn::em::FABase::FABase():
+  m_ubm(boost::shared_ptr<bob::learn::em::GMMMachine>()), m_ru(1), m_rv(1),
+  m_U(0,1), m_V(0,1), m_d(0)
+{}
+
+bob::learn::em::FABase::FABase(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
+    const size_t ru, const size_t rv):
+  m_ubm(ubm), m_ru(ru), m_rv(rv),
+  m_U(getSupervectorLength(),ru), m_V(getSupervectorLength(),rv), m_d(getSupervectorLength())
+{
+  if (ru < 1) {
+    boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
+    m % ru;
+    throw std::runtime_error(m.str());
+  }
+  if (rv < 1) {
+    boost::format m("value for parameter `rv' (%lu) cannot be smaller than 1");
+    m % ru;
+    throw std::runtime_error(m.str());
+  }
+  updateCache();
+}
+
+bob::learn::em::FABase::FABase(const bob::learn::em::FABase& other):
+  m_ubm(other.m_ubm), m_ru(other.m_ru), m_rv(other.m_rv),
+  m_U(bob::core::array::ccopy(other.m_U)),
+  m_V(bob::core::array::ccopy(other.m_V)),
+  m_d(bob::core::array::ccopy(other.m_d))
+{
+  updateCache();
+}
+
+bob::learn::em::FABase::~FABase() {
+}
+
+bob::learn::em::FABase& bob::learn::em::FABase::operator=
+(const bob::learn::em::FABase& other)
+{
+  if (this != &other)
+  {
+    m_ubm = other.m_ubm;
+    m_ru = other.m_ru;
+    m_rv = other.m_rv;
+    m_U.reference(bob::core::array::ccopy(other.m_U));
+    m_V.reference(bob::core::array::ccopy(other.m_V));
+    m_d.reference(bob::core::array::ccopy(other.m_d));
+
+    updateCache();
+  }
+  return *this;
+}
+
+bool bob::learn::em::FABase::operator==(const bob::learn::em::FABase& b) const
+{
+  return ( (((m_ubm && b.m_ubm) && *m_ubm == *(b.m_ubm)) || (!m_ubm && !b.m_ubm)) &&
+          m_ru == b.m_ru && m_rv == b.m_rv &&
+          bob::core::array::isEqual(m_U, b.m_U) &&
+          bob::core::array::isEqual(m_V, b.m_V) &&
+          bob::core::array::isEqual(m_d, b.m_d));
+}
+
+bool bob::learn::em::FABase::operator!=(const bob::learn::em::FABase& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::FABase::is_similar_to(const bob::learn::em::FABase& b,
+    const double r_epsilon, const double a_epsilon) const
+{
+  // TODO: update is_similar_to of the GMMMachine with the 2 epsilon's
+  return (( ((m_ubm && b.m_ubm) && m_ubm->is_similar_to(*(b.m_ubm), a_epsilon)) ||
+            (!m_ubm && !b.m_ubm) ) &&
+          m_ru == b.m_ru && m_rv == b.m_rv &&
+          bob::core::array::isClose(m_U, b.m_U, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_V, b.m_V, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_d, b.m_d, r_epsilon, a_epsilon));
+}
+
+void bob::learn::em::FABase::resize(const size_t ru, const size_t rv)
+{
+  if (ru < 1) {
+    boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
+    m % ru;
+    throw std::runtime_error(m.str());
+  }
+  if (rv < 1) {
+    boost::format m("value for parameter `rv' (%lu) cannot be smaller than 1");
+    m % ru;
+    throw std::runtime_error(m.str());
+  }
+
+  m_ru = ru;
+  m_rv = rv;
+  m_U.resizeAndPreserve(m_U.extent(0), ru);
+  m_V.resizeAndPreserve(m_V.extent(0), rv);
+
+  updateCacheUbmUVD();
+}
+
+void bob::learn::em::FABase::resize(const size_t ru, const size_t rv, const size_t cd)
+{
+  if (ru < 1) {
+    boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
+    m % ru;
+    throw std::runtime_error(m.str());
+  }
+  if (rv < 1) {
+    boost::format m("value for parameter `rv' (%lu) cannot be smaller than 1");
+    m % ru;
+    throw std::runtime_error(m.str());
+  }
+
+  if (!m_ubm || (m_ubm && getSupervectorLength() == cd))
+  {
+    m_ru = ru;
+    m_rv = rv;
+    m_U.resizeAndPreserve(cd, ru);
+    m_V.resizeAndPreserve(cd, rv);
+    m_d.resizeAndPreserve(cd);
+
+    updateCacheUbmUVD();
+  }
+  else {
+    boost::format m("value for parameter `cd' (%lu) is not set to %lu");
+    m % cd % getSupervectorLength();
+    throw std::runtime_error(m.str());
+  }
+}
+
+void bob::learn::em::FABase::setUbm(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm)
+{
+  m_ubm = ubm;
+  m_U.resizeAndPreserve(getSupervectorLength(), m_ru);
+  m_V.resizeAndPreserve(getSupervectorLength(), m_rv);
+  m_d.resizeAndPreserve(getSupervectorLength());
+
+  updateCache();
+}
+
+void bob::learn::em::FABase::setU(const blitz::Array<double,2>& U)
+{
+  if(U.extent(0) != m_U.extent(0)) { //checks dimension
+    boost::format m("number of rows in parameter `U' (%d) does not match the expected size (%d)");
+    m % U.extent(0) % m_U.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  if(U.extent(1) != m_U.extent(1)) { //checks dimension
+    boost::format m("number of columns in parameter `U' (%d) does not match the expected size (%d)");
+    m % U.extent(1) % m_U.extent(1);
+    throw std::runtime_error(m.str());
+  }
+  m_U.reference(bob::core::array::ccopy(U));
+
+  // update cache
+  updateCacheUbmUVD();
+}
+
+void bob::learn::em::FABase::setV(const blitz::Array<double,2>& V)
+{
+  if(V.extent(0) != m_V.extent(0)) { //checks dimension
+    boost::format m("number of rows in parameter `V' (%d) does not match the expected size (%d)");
+    m % V.extent(0) % m_V.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  if(V.extent(1) != m_V.extent(1)) { //checks dimension
+    boost::format m("number of columns in parameter `V' (%d) does not match the expected size (%d)");
+    m % V.extent(1) % m_V.extent(1);
+    throw std::runtime_error(m.str());
+  }
+  m_V.reference(bob::core::array::ccopy(V));
+}
+
+void bob::learn::em::FABase::setD(const blitz::Array<double,1>& d)
+{
+  if(d.extent(0) != m_d.extent(0)) { //checks dimension
+    boost::format m("size of input vector `d' (%d) does not match the expected size (%d)");
+    m % d.extent(0) % m_d.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  m_d.reference(bob::core::array::ccopy(d));
+}
+
+
+void bob::learn::em::FABase::updateCache()
+{
+  updateCacheUbm();
+  updateCacheUbmUVD();
+  resizeTmp();
+}
+
+void bob::learn::em::FABase::resizeTmp()
+{
+  m_tmp_IdPlusUSProdInv.resize(getDimRu(),getDimRu());
+  m_tmp_Fn_x.resize(getSupervectorLength());
+  m_tmp_ru.resize(getDimRu());
+  m_tmp_ruD.resize(getDimRu(), getNInputs());
+  m_tmp_ruru.resize(getDimRu(), getDimRu());
+}
+
+void bob::learn::em::FABase::updateCacheUbm()
+{
+  // Put supervectors in cache
+  if (m_ubm)
+  {
+    m_cache_mean.resize(getSupervectorLength());
+    m_cache_sigma.resize(getSupervectorLength());
+    m_cache_mean  = m_ubm->getMeanSupervector();
+    m_cache_sigma = m_ubm->getVarianceSupervector();
+  }
+}
+
+void bob::learn::em::FABase::updateCacheUbmUVD()
+{
+  // Compute and put  U^{T}.diag(sigma)^{-1} in cache
+  if (m_ubm)
+  {
+    blitz::firstIndex i;
+    blitz::secondIndex j;
+    m_cache_UtSigmaInv.resize(getDimRu(), getSupervectorLength());
+    m_cache_UtSigmaInv = m_U(j,i) / m_cache_sigma(j); // Ut * diag(sigma)^-1
+  }
+}
+
+void bob::learn::em::FABase::computeIdPlusUSProdInv(const bob::learn::em::GMMStats& gmm_stats,
+  blitz::Array<double,2>& output) const
+{
+  // Computes (Id + U^T.Sigma^-1.U.N_{i,h}.U)^-1 =
+  // (Id + sum_{c=1..C} N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c})^-1
+
+  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
+  // provide a non-const version of transpose())
+  blitz::Array<double,2> Ut = const_cast<blitz::Array<double,2>&>(m_U).transpose(1,0);
+
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  blitz::Range rall = blitz::Range::all();
+
+  bob::math::eye(m_tmp_ruru); // m_tmp_ruru = Id
+  // Loop and add N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c} to m_tmp_ruru at each iteration
+  const size_t dim_c = getNGaussians();
+  const size_t dim_d = getNInputs();
+  for(size_t c=0; c<dim_c; ++c) {
+    blitz::Range rc(c*dim_d,(c+1)*dim_d-1);
+    blitz::Array<double,2> Ut_c = Ut(rall,rc);
+    blitz::Array<double,1> sigma_c = m_cache_sigma(rc);
+    m_tmp_ruD = Ut_c(i,j) / sigma_c(j); // U_{c}^T.Sigma_{c}^-1
+    blitz::Array<double,2> U_c = m_U(rc,rall);
+    // Use m_cache_IdPlusUSProdInv as an intermediate array
+    bob::math::prod(m_tmp_ruD, U_c, output); // U_{c}^T.Sigma_{c}^-1.U_{c}
+    // Finally, add N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c} to m_tmp_ruru
+    m_tmp_ruru += output * gmm_stats.n(c);
+  }
+  // Computes the inverse
+  bob::math::inv(m_tmp_ruru, output);
+}
+
+
+void bob::learn::em::FABase::computeFn_x(const bob::learn::em::GMMStats& gmm_stats,
+  blitz::Array<double,1>& output) const
+{
+  // Compute Fn_x = sum_{sessions h}(N*(o - m) (Normalised first order statistics)
+  blitz::Range rall = blitz::Range::all();
+  const size_t dim_c = getNGaussians();
+  const size_t dim_d = getNInputs();
+  for(size_t c=0; c<dim_c; ++c) {
+    blitz::Range rc(c*dim_d,(c+1)*dim_d-1);
+    blitz::Array<double,1> Fn_x_c = output(rc);
+    blitz::Array<double,1> mean_c = m_cache_mean(rc);
+    Fn_x_c = gmm_stats.sumPx(c,rall) - mean_c*gmm_stats.n(c);
+  }
+}
+
+void bob::learn::em::FABase::estimateX(const blitz::Array<double,2>& IdPlusUSProdInv,
+  const blitz::Array<double,1>& Fn_x, blitz::Array<double,1>& x) const
+{
+  // m_tmp_ru = UtSigmaInv * Fn_x = Ut*diag(sigma)^-1 * N*(o - m)
+  bob::math::prod(m_cache_UtSigmaInv, Fn_x, m_tmp_ru);
+  // x = IdPlusUSProdInv * m_cache_UtSigmaInv * Fn_x
+  bob::math::prod(IdPlusUSProdInv, m_tmp_ru, x);
+}
+
+
+void bob::learn::em::FABase::estimateX(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+{
+  if (!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
+  computeIdPlusUSProdInv(gmm_stats, m_tmp_IdPlusUSProdInv); // Computes first term
+  computeFn_x(gmm_stats, m_tmp_Fn_x); // Computes last term
+  estimateX(m_tmp_IdPlusUSProdInv, m_tmp_Fn_x, x); // Estimates the value of x
+}
+
diff --git a/bob/learn/em/cpp/FABaseTrainer.cpp b/bob/learn/em/cpp/FABaseTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..09e4a4b69e262775b8ef529697f04ed2d2804621
--- /dev/null
+++ b/bob/learn/em/cpp/FABaseTrainer.cpp
@@ -0,0 +1,547 @@
+/**
+ * @date Sat Jan 31 17:16:17 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief FABaseTrainer functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/FABaseTrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.math/linear.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+
+
+bob::learn::em::FABaseTrainer::FABaseTrainer():
+  m_Nid(0), m_dim_C(0), m_dim_D(0), m_dim_ru(0), m_dim_rv(0),
+  m_x(0), m_y(0), m_z(0), m_Nacc(0), m_Facc(0)
+{
+}
+
+bob::learn::em::FABaseTrainer::FABaseTrainer(const bob::learn::em::FABaseTrainer& other)
+{
+}
+
+bob::learn::em::FABaseTrainer::~FABaseTrainer()
+{
+}
+
+void bob::learn::em::FABaseTrainer::checkStatistics(
+  const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  for (size_t id=0; id<stats.size(); ++id) {
+    for (size_t s=0; s<stats[id].size(); ++s) {
+      if (stats[id][s]->sumPx.extent(0) != (int)m_dim_C) {
+        boost::format m("GMMStats C dimension parameter = %d is different than the expected value of %d");
+        m % stats[id][s]->sumPx.extent(0) % (int)m_dim_C;
+        throw std::runtime_error(m.str());
+      }
+      if (stats[id][s]->sumPx.extent(1) != (int)m_dim_D) {
+        boost::format m("GMMStats D dimension parameter = %d is different than the expected value of %d");
+        m % stats[id][s]->sumPx.extent(1) % (int)m_dim_D;
+        throw std::runtime_error(m.str());
+      }
+    }
+  }
+}
+
+
+void bob::learn::em::FABaseTrainer::initUbmNidSumStatistics(
+  const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  m_Nid = stats.size();
+  boost::shared_ptr<bob::learn::em::GMMMachine> ubm = m.getUbm();
+  // Put UBM in cache
+  m_dim_C = ubm->getNGaussians();
+  m_dim_D = ubm->getNInputs();
+  m_dim_ru = m.getDimRu();
+  m_dim_rv = m.getDimRv();
+  // Check statistics dimensionality
+  checkStatistics(m, stats);
+  // Precomputes the sum of the statistics for each client/identity
+  precomputeSumStatisticsN(stats);
+  precomputeSumStatisticsF(stats);
+  // Cache and working arrays
+  initCache();
+}
+
+void bob::learn::em::FABaseTrainer::precomputeSumStatisticsN(
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  m_Nacc.clear();
+  blitz::Array<double,1> Nsum(m_dim_C);
+  for (size_t id=0; id<stats.size(); ++id) {
+    Nsum = 0.;
+    for (size_t s=0; s<stats[id].size(); ++s) {
+      Nsum += stats[id][s]->n;
+    }
+    m_Nacc.push_back(bob::core::array::ccopy(Nsum));
+  }
+}
+
+void bob::learn::em::FABaseTrainer::precomputeSumStatisticsF(
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  m_Facc.clear();
+  blitz::Array<double,1> Fsum(m_dim_C*m_dim_D);
+  for (size_t id=0; id<stats.size(); ++id) {
+    Fsum = 0.;
+    for (size_t s=0; s<stats[id].size(); ++s) {
+      for (size_t c=0; c<m_dim_C; ++c) {
+        blitz::Array<double,1> Fsum_c = Fsum(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+        Fsum_c += stats[id][s]->sumPx(c,blitz::Range::all());
+      }
+    }
+    m_Facc.push_back(bob::core::array::ccopy(Fsum));
+  }
+}
+
+void bob::learn::em::FABaseTrainer::initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& vec)
+{
+  m_x.clear();
+  m_y.clear();
+  m_z.clear();
+
+  blitz::Array<double,1> z0(m_dim_C*m_dim_D);
+  z0 = 0;
+  blitz::Array<double,1> y0(m_dim_rv);
+  y0 = 0;
+  blitz::Array<double,2> x0(m_dim_ru,0);
+  x0 = 0;
+  for (size_t i=0; i<vec.size(); ++i)
+  {
+    m_z.push_back(bob::core::array::ccopy(z0));
+    m_y.push_back(bob::core::array::ccopy(y0));
+    x0.resize(m_dim_ru, vec[i].size());
+    x0 = 0;
+    m_x.push_back(bob::core::array::ccopy(x0));
+  }
+}
+
+void bob::learn::em::FABaseTrainer::resetXYZ()
+{
+  for (size_t i=0; i<m_x.size(); ++i)
+  {
+    m_x[i] = 0.;
+    m_y[i] = 0.;
+    m_z[i] = 0.;
+  }
+}
+
+void bob::learn::em::FABaseTrainer::initCache()
+{
+  const size_t dim_CD = m_dim_C*m_dim_D;
+  // U
+  m_cache_UtSigmaInv.resize(m_dim_ru, dim_CD);
+  m_cache_UProd.resize(m_dim_C, m_dim_ru, m_dim_ru);
+  m_cache_IdPlusUProd_ih.resize(m_dim_ru, m_dim_ru);
+  m_cache_Fn_x_ih.resize(dim_CD);
+  m_acc_U_A1.resize(m_dim_C, m_dim_ru, m_dim_ru);
+  m_acc_U_A2.resize(dim_CD, m_dim_ru);
+  // V
+  m_cache_VtSigmaInv.resize(m_dim_rv, dim_CD);
+  m_cache_VProd.resize(m_dim_C, m_dim_rv, m_dim_rv);
+  m_cache_IdPlusVProd_i.resize(m_dim_rv, m_dim_rv);
+  m_cache_Fn_y_i.resize(dim_CD);
+  m_acc_V_A1.resize(m_dim_C, m_dim_rv, m_dim_rv);
+  m_acc_V_A2.resize(dim_CD, m_dim_rv);
+  // D
+  m_cache_DtSigmaInv.resize(dim_CD);
+  m_cache_DProd.resize(dim_CD);
+  m_cache_IdPlusDProd_i.resize(dim_CD);
+  m_cache_Fn_z_i.resize(dim_CD);
+  m_acc_D_A1.resize(dim_CD);
+  m_acc_D_A2.resize(dim_CD);
+
+  // tmp
+  m_tmp_CD.resize(dim_CD);
+  m_tmp_CD_b.resize(dim_CD);
+
+  m_tmp_ru.resize(m_dim_ru);
+  m_tmp_ruD.resize(m_dim_ru, m_dim_D);
+  m_tmp_ruru.resize(m_dim_ru, m_dim_ru);
+
+  m_tmp_rv.resize(m_dim_rv);
+  m_tmp_rvD.resize(m_dim_rv, m_dim_D);
+  m_tmp_rvrv.resize(m_dim_rv, m_dim_rv);
+}
+
+
+
+//////////////////////////// V ///////////////////////////
+void bob::learn::em::FABaseTrainer::computeVtSigmaInv(const bob::learn::em::FABase& m)
+{
+  const blitz::Array<double,2>& V = m.getV();
+  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
+  // provide a non-const version of transpose())
+  const blitz::Array<double,2> Vt = const_cast<blitz::Array<double,2>&>(V).transpose(1,0);
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  m_cache_VtSigmaInv = Vt(i,j) / sigma(j); // Vt * diag(sigma)^-1
+}
+
+void bob::learn::em::FABaseTrainer::computeVProd(const bob::learn::em::FABase& m)
+{
+  const blitz::Array<double,2>& V = m.getV();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  blitz::Range rall = blitz::Range::all();
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
+    blitz::Array<double,2> Vv_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
+    blitz::Array<double,2> Vt_c = Vv_c.transpose(1,0);
+    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+    m_tmp_rvD = Vt_c(i,j) / sigma_c(j); // Vt_c * diag(sigma)^-1
+    bob::math::prod(m_tmp_rvD, Vv_c, VProd_c);
+  }
+}
+
+void bob::learn::em::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
+{
+  const blitz::Array<double,1>& Ni = m_Nacc[id];
+  bob::math::eye(m_tmp_rvrv); // m_tmp_rvrv = I
+  blitz::Range rall = blitz::Range::all();
+  for (size_t c=0; c<m_dim_C; ++c) {
+    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
+    m_tmp_rvrv += VProd_c * Ni(c);
+  }
+  bob::math::inv(m_tmp_rvrv, m_cache_IdPlusVProd_i); // m_cache_IdPlusVProd_i = ( I+Vt*diag(sigma)^-1*Ni*V)^-1
+}
+
+void bob::learn::em::FABaseTrainer::computeFn_y_i(const bob::learn::em::FABase& mb,
+  const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& stats, const size_t id)
+{
+  const blitz::Array<double,2>& U = mb.getU();
+  const blitz::Array<double,1>& d = mb.getD();
+  // Compute Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h}) (Normalised first order statistics)
+  const blitz::Array<double,1>& Fi = m_Facc[id];
+  const blitz::Array<double,1>& m = mb.getUbmMean();
+  const blitz::Array<double,1>& z = m_z[id];
+  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
+  m_cache_Fn_y_i = Fi - m_tmp_CD * (m + d * z); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i})
+  const blitz::Array<double,2>& X = m_x[id];
+  blitz::Range rall = blitz::Range::all();
+  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
+  {
+    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
+    bob::math::prod(U, Xh, m_tmp_CD_b); // m_tmp_CD_b = U*x_{i,h}
+    const blitz::Array<double,1>& Nih = stats[h]->n;
+    bob::core::array::repelem(Nih, m_tmp_CD);
+    m_cache_Fn_y_i -= m_tmp_CD * m_tmp_CD_b; // N_{i,h} * U * x_{i,h}
+  }
+  // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+}
+
+void bob::learn::em::FABaseTrainer::updateY_i(const size_t id)
+{
+  // Computes yi = Ayi * Cvs * Fn_yi
+  blitz::Array<double,1>& y = m_y[id];
+  // m_tmp_rv = m_cache_VtSigmaInv * m_cache_Fn_y_i = Vt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+  bob::math::prod(m_cache_VtSigmaInv, m_cache_Fn_y_i, m_tmp_rv);
+  bob::math::prod(m_cache_IdPlusVProd_i, m_tmp_rv, y);
+}
+
+void bob::learn::em::FABaseTrainer::updateY(const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  // Precomputation
+  computeVtSigmaInv(m);
+  computeVProd(m);
+  // Loops over all people
+  for (size_t id=0; id<stats.size(); ++id) {
+    computeIdPlusVProd_i(id);
+    computeFn_y_i(m, stats[id], id);
+    updateY_i(id);
+  }
+}
+
+void bob::learn::em::FABaseTrainer::computeAccumulatorsV(
+  const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  // Initializes the cache accumulator
+  m_acc_V_A1 = 0.;
+  m_acc_V_A2 = 0.;
+  // Loops over all people
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  blitz::Range rall = blitz::Range::all();
+  for (size_t id=0; id<stats.size(); ++id) {
+    computeIdPlusVProd_i(id);
+    computeFn_y_i(m, stats[id], id);
+
+    // Needs to return values to be accumulated for estimating V
+    const blitz::Array<double,1>& y = m_y[id];
+    m_tmp_rvrv = m_cache_IdPlusVProd_i;
+    m_tmp_rvrv += y(i) * y(j);
+    for (size_t c=0; c<m_dim_C; ++c)
+    {
+      blitz::Array<double,2> A1_y_c = m_acc_V_A1(c, rall, rall);
+      A1_y_c += m_tmp_rvrv * m_Nacc[id](c);
+    }
+    m_acc_V_A2 += m_cache_Fn_y_i(i) * y(j);
+  }
+}
+
+void bob::learn::em::FABaseTrainer::updateV(blitz::Array<double,2>& V)
+{
+  blitz::Range rall = blitz::Range::all();
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    const blitz::Array<double,2> A1 = m_acc_V_A1(c, rall, rall);
+    bob::math::inv(A1, m_tmp_rvrv);
+    const blitz::Array<double,2> A2 = m_acc_V_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
+    blitz::Array<double,2> V_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
+    bob::math::prod(A2, m_tmp_rvrv, V_c);
+  }
+}
+
+
+//////////////////////////// U ///////////////////////////
+void bob::learn::em::FABaseTrainer::computeUtSigmaInv(const bob::learn::em::FABase& m)
+{
+  const blitz::Array<double,2>& U = m.getU();
+  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
+  // provide a non-const version of transpose())
+  const blitz::Array<double,2> Ut = const_cast<blitz::Array<double,2>&>(U).transpose(1,0);
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  m_cache_UtSigmaInv = Ut(i,j) / sigma(j); // Ut * diag(sigma)^-1
+}
+
+void bob::learn::em::FABaseTrainer::computeUProd(const bob::learn::em::FABase& m)
+{
+  const blitz::Array<double,2>& U = m.getU();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    blitz::Array<double,2> UProd_c = m_cache_UProd(c, blitz::Range::all(), blitz::Range::all());
+    blitz::Array<double,2> Uu_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), blitz::Range::all());
+    blitz::Array<double,2> Ut_c = Uu_c.transpose(1,0);
+    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+    m_tmp_ruD = Ut_c(i,j) / sigma_c(j); // Ut_c * diag(sigma)^-1
+    bob::math::prod(m_tmp_ruD, Uu_c, UProd_c);
+  }
+}
+
+void bob::learn::em::FABaseTrainer::computeIdPlusUProd_ih(
+  const boost::shared_ptr<bob::learn::em::GMMStats>& stats)
+{
+  const blitz::Array<double,1>& Nih = stats->n;
+  bob::math::eye(m_tmp_ruru); // m_tmp_ruru = I
+  for (size_t c=0; c<m_dim_C; ++c) {
+    blitz::Array<double,2> UProd_c = m_cache_UProd(c,blitz::Range::all(),blitz::Range::all());
+    m_tmp_ruru += UProd_c * Nih(c);
+  }
+  bob::math::inv(m_tmp_ruru, m_cache_IdPlusUProd_ih); // m_cache_IdPlusUProd_ih = ( I+Ut*diag(sigma)^-1*Ni*U)^-1
+}
+
+void bob::learn::em::FABaseTrainer::computeFn_x_ih(const bob::learn::em::FABase& mb,
+  const boost::shared_ptr<bob::learn::em::GMMStats>& stats, const size_t id)
+{
+  const blitz::Array<double,2>& V = mb.getV();
+  const blitz::Array<double,1>& d =  mb.getD();
+  // Compute Fn_x_ih = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i}) (Normalised first order statistics)
+  const blitz::Array<double,2>& Fih = stats->sumPx;
+  const blitz::Array<double,1>& m = mb.getUbmMean();
+  const blitz::Array<double,1>& z = m_z[id];
+  const blitz::Array<double,1>& Nih = stats->n;
+  bob::core::array::repelem(Nih, m_tmp_CD);
+  for (size_t c=0; c<m_dim_C; ++c) {
+    blitz::Array<double,1> Fn_x_ih_c = m_cache_Fn_x_ih(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+    Fn_x_ih_c = Fih(c,blitz::Range::all());
+  }
+  m_cache_Fn_x_ih -= m_tmp_CD * (m + d * z); // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i})
+
+  const blitz::Array<double,1>& y = m_y[id];
+  bob::math::prod(V, y, m_tmp_CD_b);
+  m_cache_Fn_x_ih -= m_tmp_CD * m_tmp_CD_b;
+  // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
+}
+
+void bob::learn::em::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
+{
+  // Computes xih = Axih * Cus * Fn_x_ih
+  blitz::Array<double,1> x = m_x[id](blitz::Range::all(), h);
+  // m_tmp_ru = m_cache_UtSigmaInv * m_cache_Fn_x_ih = Ut*diag(sigma)^-1 * N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
+  bob::math::prod(m_cache_UtSigmaInv, m_cache_Fn_x_ih, m_tmp_ru);
+  bob::math::prod(m_cache_IdPlusUProd_ih, m_tmp_ru, x);
+}
+
+void bob::learn::em::FABaseTrainer::updateX(const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  // Precomputation
+  computeUtSigmaInv(m);
+  computeUProd(m);
+  // Loops over all people
+  for (size_t id=0; id<stats.size(); ++id) {
+    int n_session_i = stats[id].size();
+    for (int s=0; s<n_session_i; ++s) {
+      computeIdPlusUProd_ih(stats[id][s]);
+      computeFn_x_ih(m, stats[id][s], id);
+      updateX_ih(id, s);
+    }
+  }
+}
+
+void bob::learn::em::FABaseTrainer::computeAccumulatorsU(
+  const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  // Initializes the cache accumulator
+  m_acc_U_A1 = 0.;
+  m_acc_U_A2 = 0.;
+  // Loops over all people
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  blitz::Range rall = blitz::Range::all();
+  for (size_t id=0; id<stats.size(); ++id) {
+    int n_session_i = stats[id].size();
+    for (int h=0; h<n_session_i; ++h) {
+      computeIdPlusUProd_ih(stats[id][h]);
+      computeFn_x_ih(m, stats[id][h], id);
+
+      // Needs to return values to be accumulated for estimating U
+      blitz::Array<double,1> x = m_x[id](rall, h);
+      m_tmp_ruru = m_cache_IdPlusUProd_ih;
+      m_tmp_ruru += x(i) * x(j);
+      for (int c=0; c<(int)m_dim_C; ++c)
+      {
+        blitz::Array<double,2> A1_x_c = m_acc_U_A1(c,rall,rall);
+        A1_x_c += m_tmp_ruru * stats[id][h]->n(c);
+      }
+      m_acc_U_A2 += m_cache_Fn_x_ih(i) * x(j);
+    }
+  }
+}
+
+void bob::learn::em::FABaseTrainer::updateU(blitz::Array<double,2>& U)
+{
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    const blitz::Array<double,2> A1 = m_acc_U_A1(c,blitz::Range::all(),blitz::Range::all());
+    bob::math::inv(A1, m_tmp_ruru);
+    const blitz::Array<double,2> A2 = m_acc_U_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
+    blitz::Array<double,2> U_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
+    bob::math::prod(A2, m_tmp_ruru, U_c);
+  }
+}
+
+
+//////////////////////////// D ///////////////////////////
+void bob::learn::em::FABaseTrainer::computeDtSigmaInv(const bob::learn::em::FABase& m)
+{
+  const blitz::Array<double,1>& d = m.getD();
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  m_cache_DtSigmaInv = d / sigma; // Dt * diag(sigma)^-1
+}
+
+void bob::learn::em::FABaseTrainer::computeDProd(const bob::learn::em::FABase& m)
+{
+  const blitz::Array<double,1>& d = m.getD();
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  m_cache_DProd = d / sigma * d; // Dt * diag(sigma)^-1 * D
+}
+
+void bob::learn::em::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
+{
+  const blitz::Array<double,1>& Ni = m_Nacc[id];
+  bob::core::array::repelem(Ni, m_tmp_CD); // m_tmp_CD = Ni 'repmat'
+  m_cache_IdPlusDProd_i = 1.; // m_cache_IdPlusDProd_i = Id
+  m_cache_IdPlusDProd_i += m_cache_DProd * m_tmp_CD; // m_cache_IdPlusDProd_i = I+Dt*diag(sigma)^-1*Ni*D
+  m_cache_IdPlusDProd_i = 1 / m_cache_IdPlusDProd_i; // m_cache_IdPlusVProd_i = (I+Dt*diag(sigma)^-1*Ni*D)^-1
+}
+
+void bob::learn::em::FABaseTrainer::computeFn_z_i(
+  const bob::learn::em::FABase& mb,
+  const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& stats, const size_t id)
+{
+  const blitz::Array<double,2>& U = mb.getU();
+  const blitz::Array<double,2>& V = mb.getV();
+  // Compute Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h}) (Normalised first order statistics)
+  const blitz::Array<double,1>& Fi = m_Facc[id];
+  const blitz::Array<double,1>& m = mb.getUbmMean();
+  const blitz::Array<double,1>& y = m_y[id];
+  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
+  bob::math::prod(V, y, m_tmp_CD_b); // m_tmp_CD_b = V * y
+  m_cache_Fn_z_i = Fi - m_tmp_CD * (m + m_tmp_CD_b); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i})
+
+  const blitz::Array<double,2>& X = m_x[id];
+  blitz::Range rall = blitz::Range::all();
+  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
+  {
+    const blitz::Array<double,1>& Nh = stats[h]->n; // Nh = N_{i,h} (length: C)
+    bob::core::array::repelem(Nh, m_tmp_CD);
+    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
+    bob::math::prod(U, Xh, m_tmp_CD_b);
+    m_cache_Fn_z_i -= m_tmp_CD * m_tmp_CD_b;
+  }
+  // Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
+}
+
+void bob::learn::em::FABaseTrainer::updateZ_i(const size_t id)
+{
+  // Computes zi = Azi * D^T.Sigma^-1 * Fn_zi
+  blitz::Array<double,1>& z = m_z[id];
+  // m_tmp_CD = m_cache_DtSigmaInv * m_cache_Fn_z_i = Dt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
+  z = m_cache_IdPlusDProd_i * m_cache_DtSigmaInv * m_cache_Fn_z_i;
+}
+
+void bob::learn::em::FABaseTrainer::updateZ(const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  // Precomputation
+  computeDtSigmaInv(m);
+  computeDProd(m);
+  // Loops over all people
+  for (size_t id=0; id<m_Nid; ++id) {
+    computeIdPlusDProd_i(id);
+    computeFn_z_i(m, stats[id], id);
+    updateZ_i(id);
+  }
+}
+
+void bob::learn::em::FABaseTrainer::computeAccumulatorsD(
+  const bob::learn::em::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats)
+{
+  // Initializes the cache accumulator
+  m_acc_D_A1 = 0.;
+  m_acc_D_A2 = 0.;
+  // Loops over all people
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  for (size_t id=0; id<stats.size(); ++id) {
+    computeIdPlusDProd_i(id);
+    computeFn_z_i(m, stats[id], id);
+
+    // Needs to return values to be accumulated for estimating D
+    blitz::Array<double,1> z = m_z[id];
+    bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
+    m_acc_D_A1 += (m_cache_IdPlusDProd_i + z * z) * m_tmp_CD;
+    m_acc_D_A2 += m_cache_Fn_z_i * z;
+  }
+}
+
+void bob::learn::em::FABaseTrainer::updateD(blitz::Array<double,1>& d)
+{
+  d = m_acc_D_A2 / m_acc_D_A1;
+}
+
+
diff --git a/bob/learn/em/cpp/GMMBaseTrainer.cpp b/bob/learn/em/cpp/GMMBaseTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9351975741ae2f52aab17cd53c38ed27fe1c7c86
--- /dev/null
+++ b/bob/learn/em/cpp/GMMBaseTrainer.cpp
@@ -0,0 +1,94 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/GMMBaseTrainer.h>
+#include <bob.core/assert.h>
+#include <bob.core/check.h>
+
+bob::learn::em::GMMBaseTrainer::GMMBaseTrainer(const bool update_means,
+    const bool update_variances, const bool update_weights,
+    const double mean_var_update_responsibilities_threshold):
+  m_update_means(update_means), m_update_variances(update_variances),
+  m_update_weights(update_weights),
+  m_mean_var_update_responsibilities_threshold(mean_var_update_responsibilities_threshold)
+{}
+
+bob::learn::em::GMMBaseTrainer::GMMBaseTrainer(const bob::learn::em::GMMBaseTrainer& b):
+  m_update_means(b.m_update_means), m_update_variances(b.m_update_variances),
+  m_mean_var_update_responsibilities_threshold(b.m_mean_var_update_responsibilities_threshold)
+{}
+
+bob::learn::em::GMMBaseTrainer::~GMMBaseTrainer()
+{}
+
+void bob::learn::em::GMMBaseTrainer::initialize(bob::learn::em::GMMMachine& gmm)
+{
+  // Allocate memory for the sufficient statistics and initialise
+  m_ss.resize(gmm.getNGaussians(),gmm.getNInputs());
+}
+
+void bob::learn::em::GMMBaseTrainer::eStep(bob::learn::em::GMMMachine& gmm,
+  const blitz::Array<double,2>& data)
+{
+  m_ss.init();
+  // Calculate the sufficient statistics and save in m_ss
+  gmm.accStatistics(data, m_ss);
+}
+
+double bob::learn::em::GMMBaseTrainer::computeLikelihood(bob::learn::em::GMMMachine& gmm)
+{
+  return m_ss.log_likelihood / m_ss.T;
+}
+
+
+bob::learn::em::GMMBaseTrainer& bob::learn::em::GMMBaseTrainer::operator=
+  (const bob::learn::em::GMMBaseTrainer &other)
+{
+  if (this != &other)
+  {
+    m_ss = other.m_ss;
+    m_update_means = other.m_update_means;
+    m_update_variances = other.m_update_variances;
+    m_update_weights = other.m_update_weights;
+    m_mean_var_update_responsibilities_threshold = other.m_mean_var_update_responsibilities_threshold;
+  }
+  return *this;
+}
+
+bool bob::learn::em::GMMBaseTrainer::operator==
+  (const bob::learn::em::GMMBaseTrainer &other) const
+{
+  return m_ss == other.m_ss &&
+         m_update_means == other.m_update_means &&
+         m_update_variances == other.m_update_variances &&
+         m_update_weights == other.m_update_weights &&
+         m_mean_var_update_responsibilities_threshold == other.m_mean_var_update_responsibilities_threshold;
+}
+
+bool bob::learn::em::GMMBaseTrainer::operator!=
+  (const bob::learn::em::GMMBaseTrainer &other) const
+{
+  return !(this->operator==(other));
+}
+
+bool bob::learn::em::GMMBaseTrainer::is_similar_to
+  (const bob::learn::em::GMMBaseTrainer &other, const double r_epsilon,
+   const double a_epsilon) const
+{
+  return m_ss == other.m_ss &&
+         m_update_means == other.m_update_means &&
+         m_update_variances == other.m_update_variances &&
+         m_update_weights == other.m_update_weights &&
+         bob::core::isClose(m_mean_var_update_responsibilities_threshold,
+          other.m_mean_var_update_responsibilities_threshold, r_epsilon, a_epsilon);
+}
+
+void bob::learn::em::GMMBaseTrainer::setGMMStats(const bob::learn::em::GMMStats& stats)
+{
+  bob::core::array::assertSameShape(m_ss.sumPx, stats.sumPx);
+  m_ss = stats;
+}
diff --git a/bob/learn/em/cpp/GMMMachine.cpp b/bob/learn/em/cpp/GMMMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..7b8b3daff0a185995cf6a08a7c0e0ed80bf5ad47
--- /dev/null
+++ b/bob/learn/em/cpp/GMMMachine.cpp
@@ -0,0 +1,436 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.core/assert.h>
+#include <bob.math/log.h>
+
+bob::learn::em::GMMMachine::GMMMachine(): m_gaussians(0) {
+  resize(0,0);
+}
+
+bob::learn::em::GMMMachine::GMMMachine(const size_t n_gaussians, const size_t n_inputs):
+  m_gaussians(0)
+{
+  resize(n_gaussians,n_inputs);
+}
+
+bob::learn::em::GMMMachine::GMMMachine(bob::io::base::HDF5File& config):
+  m_gaussians(0)
+{
+  load(config);
+}
+
+bob::learn::em::GMMMachine::GMMMachine(const GMMMachine& other)  
+{
+  copy(other);
+}
+
+bob::learn::em::GMMMachine& bob::learn::em::GMMMachine::operator=(const bob::learn::em::GMMMachine &other) {
+  // protect against invalid self-assignment
+  if (this != &other)
+    copy(other);
+
+  // by convention, always return *this
+  return *this;
+}
+
+bool bob::learn::em::GMMMachine::operator==(const bob::learn::em::GMMMachine& b) const
+{
+  if (m_n_gaussians != b.m_n_gaussians || m_n_inputs != b.m_n_inputs ||
+      !bob::core::array::isEqual(m_weights, b.m_weights))
+    return false;
+
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    if(!(*(m_gaussians[i]) == *(b.m_gaussians[i])))
+      return false;
+  }
+
+  return true;
+}
+
+bool bob::learn::em::GMMMachine::operator!=(const bob::learn::em::GMMMachine& b) const {
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::GMMMachine::is_similar_to(const bob::learn::em::GMMMachine& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  if (m_n_gaussians != b.m_n_gaussians || m_n_inputs != b.m_n_inputs ||
+      !bob::core::array::isClose(m_weights, b.m_weights, r_epsilon, a_epsilon))
+    return false;
+
+  for (size_t i = 0; i < m_n_gaussians; ++i)
+    if (!m_gaussians[i]->is_similar_to(*b.m_gaussians[i], r_epsilon, a_epsilon))
+      return false;
+
+  return true;
+}
+
+void bob::learn::em::GMMMachine::copy(const GMMMachine& other) {
+  m_n_gaussians = other.m_n_gaussians;
+  m_n_inputs = other.m_n_inputs;
+
+  // Initialise weights
+  m_weights.resize(m_n_gaussians);
+  m_weights = other.m_weights;
+
+  // Initialise Gaussians
+  m_gaussians.clear();
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    boost::shared_ptr<bob::learn::em::Gaussian> g(new bob::learn::em::Gaussian(*(other.m_gaussians[i])));
+    m_gaussians.push_back(g);
+  }
+
+  // Initialise cache
+  initCache();
+}
+
+bob::learn::em::GMMMachine::~GMMMachine() { }
+
+
+/////////////////////
+// Setters 
+////////////////////
+
+void bob::learn::em::GMMMachine::setWeights(const blitz::Array<double,1> &weights) {
+  bob::core::array::assertSameShape(weights, m_weights);
+  m_weights = weights;
+  recomputeLogWeights();
+}
+
+void bob::learn::em::GMMMachine::recomputeLogWeights() const
+{
+  m_cache_log_weights = blitz::log(m_weights);
+}
+
+void bob::learn::em::GMMMachine::setMeans(const blitz::Array<double,2> &means) {
+  bob::core::array::assertSameDimensionLength(means.extent(0), m_n_gaussians);
+  bob::core::array::assertSameDimensionLength(means.extent(1), m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    m_gaussians[i]->updateMean() = means(i,blitz::Range::all());
+  m_cache_supervector = false;
+}
+
+void bob::learn::em::GMMMachine::setMeanSupervector(const blitz::Array<double,1> &mean_supervector) {
+  bob::core::array::assertSameDimensionLength(mean_supervector.extent(0), m_n_gaussians*m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    m_gaussians[i]->updateMean() = mean_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
+  m_cache_supervector = false;
+}
+
+
+void bob::learn::em::GMMMachine::setVariances(const blitz::Array<double, 2 >& variances) {
+  bob::core::array::assertSameDimensionLength(variances.extent(0), m_n_gaussians);
+  bob::core::array::assertSameDimensionLength(variances.extent(1), m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    m_gaussians[i]->updateVariance() = variances(i,blitz::Range::all());
+    m_gaussians[i]->applyVarianceThresholds();
+  }
+  m_cache_supervector = false;
+}
+
+void bob::learn::em::GMMMachine::setVarianceSupervector(const blitz::Array<double,1> &variance_supervector) {
+  bob::core::array::assertSameDimensionLength(variance_supervector.extent(0), m_n_gaussians*m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    m_gaussians[i]->updateVariance() = variance_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
+    m_gaussians[i]->applyVarianceThresholds();
+  }
+  m_cache_supervector = false;
+}
+
+void bob::learn::em::GMMMachine::setVarianceThresholds(const double value) {
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    m_gaussians[i]->setVarianceThresholds(value);
+  m_cache_supervector = false;
+}
+
+void bob::learn::em::GMMMachine::setVarianceThresholds(blitz::Array<double, 1> variance_thresholds) {
+  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    m_gaussians[i]->setVarianceThresholds(variance_thresholds);
+  m_cache_supervector = false;
+}
+
+void bob::learn::em::GMMMachine::setVarianceThresholds(const blitz::Array<double, 2>& variance_thresholds) {
+  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
+  bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    m_gaussians[i]->setVarianceThresholds(variance_thresholds(i,blitz::Range::all()));
+  m_cache_supervector = false;
+}
+
+/////////////////////
+// Getters 
+////////////////////
+
+const blitz::Array<double,2> bob::learn::em::GMMMachine::getMeans() const {
+
+  blitz::Array<double,2> means(m_n_gaussians,m_n_inputs);  
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    means(i,blitz::Range::all()) = m_gaussians[i]->getMean();
+    
+  return means;
+}
+
+const blitz::Array<double,2> bob::learn::em::GMMMachine::getVariances() const{
+  
+  blitz::Array<double,2> variances(m_n_gaussians,m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    variances(i,blitz::Range::all()) = m_gaussians[i]->getVariance();
+
+  return variances;
+}
+
+
+const blitz::Array<double,2>  bob::learn::em::GMMMachine::getVarianceThresholds() const {
+  //bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
+  //bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
+  blitz::Array<double, 2> variance_thresholds(m_n_gaussians, m_n_inputs);
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    variance_thresholds(i,blitz::Range::all()) = m_gaussians[i]->getVarianceThresholds();
+
+  return variance_thresholds;
+}
+
+
+/////////////////////
+// Methods
+////////////////////
+
+
+void bob::learn::em::GMMMachine::resize(const size_t n_gaussians, const size_t n_inputs) {
+  m_n_gaussians = n_gaussians;
+  m_n_inputs = n_inputs;
+
+  // Initialise weights
+  m_weights.resize(m_n_gaussians);
+  m_weights = 1.0 / m_n_gaussians;
+
+  // Initialise Gaussians
+  m_gaussians.clear();
+  for(size_t i=0; i<m_n_gaussians; ++i)
+    m_gaussians.push_back(boost::shared_ptr<bob::learn::em::Gaussian>(new bob::learn::em::Gaussian(n_inputs)));
+
+  // Initialise cache arrays
+  initCache();
+}
+
+double bob::learn::em::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x,
+  blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
+{
+  // Check dimension
+  bob::core::array::assertSameDimensionLength(log_weighted_gaussian_likelihoods.extent(0), m_n_gaussians);
+  bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
+  return logLikelihood_(x,log_weighted_gaussian_likelihoods);
+}
+
+double bob::learn::em::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x,
+  blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
+{
+  // Initialise variables
+  double log_likelihood = bob::math::Log::LogZero;
+
+  // Accumulate the weighted log likelihoods from each Gaussian
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    double l = m_cache_log_weights(i) + m_gaussians[i]->logLikelihood_(x);
+    log_weighted_gaussian_likelihoods(i) = l;
+    log_likelihood = bob::math::Log::logAdd(log_likelihood, l);
+  }
+
+  // Return log(p(x|GMMMachine))
+  return log_likelihood;
+}
+
+double bob::learn::em::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x) const {
+  // Check dimension
+  bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
+  // Call the other logLikelihood_ (overloaded) function
+  // (log_weighted_gaussian_likelihoods will be discarded)
+  return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
+}
+
+double bob::learn::em::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x) const {
+  // Call the other logLikelihood (overloaded) function
+  // (log_weighted_gaussian_likelihoods will be discarded)
+  return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
+}
+
+void bob::learn::em::GMMMachine::accStatistics(const blitz::Array<double,2>& input,
+    bob::learn::em::GMMStats& stats) const {
+  // iterate over data
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<input.extent(0); ++i) {
+    // Get example
+    blitz::Array<double,1> x(input(i,a));
+    // Accumulate statistics
+    accStatistics(x,stats);
+  }
+}
+
+void bob::learn::em::GMMMachine::accStatistics_(const blitz::Array<double,2>& input, bob::learn::em::GMMStats& stats) const {
+  // iterate over data
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<input.extent(0); ++i) {
+    // Get example
+    blitz::Array<double,1> x(input(i, a));
+    // Accumulate statistics
+    accStatistics_(x,stats);
+  }
+}
+
+void bob::learn::em::GMMMachine::accStatistics(const blitz::Array<double, 1>& x, bob::learn::em::GMMStats& stats) const {
+  // check GMMStats size
+  bob::core::array::assertSameDimensionLength(stats.sumPx.extent(0), m_n_gaussians);
+  bob::core::array::assertSameDimensionLength(stats.sumPx.extent(1), m_n_inputs);
+
+  // Calculate Gaussian and GMM likelihoods
+  // - m_cache_log_weighted_gaussian_likelihoods(i) = log(weight_i*p(x|gaussian_i))
+  // - log_likelihood = log(sum_i(weight_i*p(x|gaussian_i)))
+  double log_likelihood = logLikelihood(x, m_cache_log_weighted_gaussian_likelihoods);
+
+  accStatisticsInternal(x, stats, log_likelihood);
+}
+
+void bob::learn::em::GMMMachine::accStatistics_(const blitz::Array<double, 1>& x, bob::learn::em::GMMStats& stats) const {
+  // Calculate Gaussian and GMM likelihoods
+  // - m_cache_log_weighted_gaussian_likelihoods(i) = log(weight_i*p(x|gaussian_i))
+  // - log_likelihood = log(sum_i(weight_i*p(x|gaussian_i)))
+  double log_likelihood = logLikelihood_(x, m_cache_log_weighted_gaussian_likelihoods);
+
+  accStatisticsInternal(x, stats, log_likelihood);
+}
+
+void bob::learn::em::GMMMachine::accStatisticsInternal(const blitz::Array<double, 1>& x,
+  bob::learn::em::GMMStats& stats, const double log_likelihood) const
+{
+  // Calculate responsibilities
+  m_cache_P = blitz::exp(m_cache_log_weighted_gaussian_likelihoods - log_likelihood);
+
+  // Accumulate statistics
+  // - total likelihood
+  stats.log_likelihood += log_likelihood;
+
+  // - number of samples
+  stats.T++;
+
+  // - responsibilities
+  stats.n += m_cache_P;
+
+  // - first order stats
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+
+  m_cache_Px = m_cache_P(i) * x(j);
+
+  stats.sumPx += m_cache_Px;
+
+  // - second order stats
+  stats.sumPxx += (m_cache_Px(i,j) * x(j));
+}
+
+boost::shared_ptr<bob::learn::em::Gaussian> bob::learn::em::GMMMachine::getGaussian(const size_t i) {
+  if (i>=m_n_gaussians) {
+    throw std::runtime_error("getGaussian(): index out of bounds");
+  }
+  return m_gaussians[i];
+}
+
+void bob::learn::em::GMMMachine::save(bob::io::base::HDF5File& config) const {
+  int64_t v = static_cast<int64_t>(m_n_gaussians);
+  config.set("m_n_gaussians", v);
+  v = static_cast<int64_t>(m_n_inputs);
+  config.set("m_n_inputs", v);
+
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    std::ostringstream oss;
+    oss << "m_gaussians" << i;
+
+    if (!config.hasGroup(oss.str())) config.createGroup(oss.str());
+    config.cd(oss.str());
+    m_gaussians[i]->save(config);
+    config.cd("..");
+  }
+
+  config.setArray("m_weights", m_weights);
+}
+
+void bob::learn::em::GMMMachine::load(bob::io::base::HDF5File& config) {
+  int64_t v;
+  v = config.read<int64_t>("m_n_gaussians");
+  m_n_gaussians = static_cast<size_t>(v);
+  v = config.read<int64_t>("m_n_inputs");
+  m_n_inputs = static_cast<size_t>(v);
+
+  m_gaussians.clear();
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    m_gaussians.push_back(boost::shared_ptr<bob::learn::em::Gaussian>(new bob::learn::em::Gaussian(m_n_inputs)));
+    std::ostringstream oss;
+    oss << "m_gaussians" << i;
+    config.cd(oss.str());
+    m_gaussians[i]->load(config);
+    config.cd("..");
+  }
+
+  m_weights.resize(m_n_gaussians);
+  config.readArray("m_weights", m_weights);
+
+  // Initialise cache
+  initCache();
+}
+
+void bob::learn::em::GMMMachine::updateCacheSupervectors() const
+{
+  m_cache_mean_supervector.resize(m_n_gaussians*m_n_inputs);
+  m_cache_variance_supervector.resize(m_n_gaussians*m_n_inputs);
+
+  for(size_t i=0; i<m_n_gaussians; ++i) {
+    blitz::Range range(i*m_n_inputs, (i+1)*m_n_inputs-1);
+    m_cache_mean_supervector(range) = m_gaussians[i]->getMean();
+    m_cache_variance_supervector(range) = m_gaussians[i]->getVariance();
+  }
+  m_cache_supervector = true;
+}
+
+void bob::learn::em::GMMMachine::initCache() const {
+  // Initialise cache arrays
+  m_cache_log_weights.resize(m_n_gaussians);
+  recomputeLogWeights();
+  m_cache_log_weighted_gaussian_likelihoods.resize(m_n_gaussians);
+  m_cache_P.resize(m_n_gaussians);
+  m_cache_Px.resize(m_n_gaussians,m_n_inputs);
+  m_cache_supervector = false;
+}
+
+void bob::learn::em::GMMMachine::reloadCacheSupervectors() const {
+  if(!m_cache_supervector)
+    updateCacheSupervectors();
+}
+
+const blitz::Array<double,1>& bob::learn::em::GMMMachine::getMeanSupervector() const {
+  if(!m_cache_supervector)
+    updateCacheSupervectors();
+  return m_cache_mean_supervector;
+}
+
+const blitz::Array<double,1>& bob::learn::em::GMMMachine::getVarianceSupervector() const {
+  if(!m_cache_supervector)
+    updateCacheSupervectors();
+  return m_cache_variance_supervector;
+}
+
+namespace bob { namespace learn { namespace em {
+  std::ostream& operator<<(std::ostream& os, const GMMMachine& machine) {
+    os << "Weights = " << machine.m_weights << std::endl;
+    for(size_t i=0; i < machine.m_n_gaussians; ++i) {
+      os << "Gaussian " << i << ": " << std::endl << *(machine.m_gaussians[i]);
+    }
+
+    return os;
+  }
+} } }
diff --git a/bob/learn/em/cpp/GMMStats.cpp b/bob/learn/em/cpp/GMMStats.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..8a9f783c58068b2b67f9a7ac0e735447ff265143
--- /dev/null
+++ b/bob/learn/em/cpp/GMMStats.cpp
@@ -0,0 +1,151 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/GMMStats.h>
+#include <bob.core/logging.h>
+#include <bob.core/check.h>
+
+bob::learn::em::GMMStats::GMMStats() {
+  resize(0,0);
+}
+
+bob::learn::em::GMMStats::GMMStats(const size_t n_gaussians, const size_t n_inputs) {
+  resize(n_gaussians,n_inputs);
+}
+
+bob::learn::em::GMMStats::GMMStats(bob::io::base::HDF5File& config) {
+  load(config);
+}
+
+bob::learn::em::GMMStats::GMMStats(const bob::learn::em::GMMStats& other) {
+  copy(other);
+}
+
+bob::learn::em::GMMStats::~GMMStats() {
+}
+
+bob::learn::em::GMMStats&
+bob::learn::em::GMMStats::operator=(const bob::learn::em::GMMStats& other) {
+  // protect against invalid self-assignment
+  if (this != &other)
+    copy(other);
+
+  // by convention, always return *this
+  return *this;
+}
+
+bool bob::learn::em::GMMStats::operator==(const bob::learn::em::GMMStats& b) const
+{
+  return (T == b.T && log_likelihood == b.log_likelihood &&
+          bob::core::array::isEqual(n, b.n) &&
+          bob::core::array::isEqual(sumPx, b.sumPx) &&
+          bob::core::array::isEqual(sumPxx, b.sumPxx));
+}
+
+bool
+bob::learn::em::GMMStats::operator!=(const bob::learn::em::GMMStats& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::GMMStats::is_similar_to(const bob::learn::em::GMMStats& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  return (T == b.T &&
+          bob::core::isClose(log_likelihood, b.log_likelihood, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(n, b.n, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(sumPx, b.sumPx, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(sumPxx, b.sumPxx, r_epsilon, a_epsilon));
+}
+
+
+void bob::learn::em::GMMStats::operator+=(const bob::learn::em::GMMStats& b) {
+  // Check dimensions
+  if(n.extent(0) != b.n.extent(0) ||
+      sumPx.extent(0) != b.sumPx.extent(0) || sumPx.extent(1) != b.sumPx.extent(1) ||
+      sumPxx.extent(0) != b.sumPxx.extent(0) || sumPxx.extent(1) != b.sumPxx.extent(1))
+    // TODO: add a specialized exception
+    throw std::runtime_error("if you see this exception, fill a bug report");
+
+  // Update GMMStats object with the content of the other one
+  T += b.T;
+  log_likelihood += b.log_likelihood;
+  n += b.n;
+  sumPx += b.sumPx;
+  sumPxx += b.sumPxx;
+}
+
+void bob::learn::em::GMMStats::copy(const GMMStats& other) {
+  // Resize arrays
+  resize(other.sumPx.extent(0),other.sumPx.extent(1));
+  // Copy content
+  T = other.T;
+  log_likelihood = other.log_likelihood;
+  n = other.n;
+  sumPx = other.sumPx;
+  sumPxx = other.sumPxx;
+}
+
+void bob::learn::em::GMMStats::resize(const size_t n_gaussians, const size_t n_inputs) {
+  n.resize(n_gaussians);
+  sumPx.resize(n_gaussians, n_inputs);
+  sumPxx.resize(n_gaussians, n_inputs);
+  init();
+}
+
+void bob::learn::em::GMMStats::init() {
+  log_likelihood = 0;
+  T = 0;
+  n = 0.0;
+  sumPx = 0.0;
+  sumPxx = 0.0;
+}
+
+void bob::learn::em::GMMStats::save(bob::io::base::HDF5File& config) const {
+  //please note we fix the output values to be of a precise type so they can be
+  //retrieved at any platform with the exact same precision.
+  // TODO: add versioning, replace int64_t by uint64_t and log_liklihood by log_likelihood
+  int64_t sumpx_shape_0 = sumPx.shape()[0];
+  int64_t sumpx_shape_1 = sumPx.shape()[1];
+  config.set("n_gaussians", sumpx_shape_0);
+  config.set("n_inputs", sumpx_shape_1);
+  config.set("log_liklihood", log_likelihood); //double
+  config.set("T", static_cast<int64_t>(T));
+  config.setArray("n", n); //Array1d
+  config.setArray("sumPx", sumPx); //Array2d
+  config.setArray("sumPxx", sumPxx); //Array2d
+}
+
+void bob::learn::em::GMMStats::load(bob::io::base::HDF5File& config) {
+  log_likelihood = config.read<double>("log_liklihood");
+  int64_t n_gaussians = config.read<int64_t>("n_gaussians");
+  int64_t n_inputs = config.read<int64_t>("n_inputs");
+  T = static_cast<size_t>(config.read<int64_t>("T"));
+
+  //resize arrays to prepare for HDF5 readout
+  n.resize(n_gaussians);
+  sumPx.resize(n_gaussians, n_inputs);
+  sumPxx.resize(n_gaussians, n_inputs);
+
+  //load data
+  config.readArray("n", n);
+  config.readArray("sumPx", sumPx);
+  config.readArray("sumPxx", sumPxx);
+}
+
+namespace bob { namespace learn { namespace em {
+  std::ostream& operator<<(std::ostream& os, const GMMStats& g) {
+    os << "log_likelihood = " << g.log_likelihood << std::endl;
+    os << "T = " << g.T << std::endl;
+    os << "n = " << g.n;
+    os << "sumPx = " << g.sumPx;
+    os << "sumPxx = " << g.sumPxx;
+
+    return os;
+  }
+} } }
diff --git a/bob/learn/em/cpp/Gaussian.cpp b/bob/learn/em/cpp/Gaussian.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b8bdbbf888966836b47b5c58d43876bc474002d7
--- /dev/null
+++ b/bob/learn/em/cpp/Gaussian.cpp
@@ -0,0 +1,184 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/Gaussian.h>
+
+#include <bob.core/assert.h>
+#include <bob.math/log.h>
+
+bob::learn::em::Gaussian::Gaussian() {
+  resize(0);
+}
+
+bob::learn::em::Gaussian::Gaussian(const size_t n_inputs) {
+  resize(n_inputs);
+}
+
+bob::learn::em::Gaussian::Gaussian(const bob::learn::em::Gaussian& other) {
+  copy(other);
+}
+
+bob::learn::em::Gaussian::Gaussian(bob::io::base::HDF5File& config) {
+  load(config);
+}
+
+bob::learn::em::Gaussian::~Gaussian() {
+}
+
+bob::learn::em::Gaussian& bob::learn::em::Gaussian::operator=(const bob::learn::em::Gaussian &other) {
+  if(this != &other)
+    copy(other);
+
+  return *this;
+}
+
+bool bob::learn::em::Gaussian::operator==(const bob::learn::em::Gaussian& b) const
+{
+  return (bob::core::array::isEqual(m_mean, b.m_mean) &&
+          bob::core::array::isEqual(m_variance, b.m_variance) &&
+          bob::core::array::isEqual(m_variance_thresholds, b.m_variance_thresholds));
+}
+
+bool bob::learn::em::Gaussian::operator!=(const bob::learn::em::Gaussian& b) const {
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::Gaussian::is_similar_to(const bob::learn::em::Gaussian& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  return (bob::core::array::isClose(m_mean, b.m_mean, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_variance, b.m_variance, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_variance_thresholds, b.m_variance_thresholds, r_epsilon, a_epsilon));
+}
+
+void bob::learn::em::Gaussian::copy(const bob::learn::em::Gaussian& other) {
+  m_n_inputs = other.m_n_inputs;
+
+  m_mean.resize(m_n_inputs);
+  m_mean = other.m_mean;
+
+  m_variance.resize(m_n_inputs);
+  m_variance = other.m_variance;
+
+  m_variance_thresholds.resize(m_n_inputs);
+  m_variance_thresholds = other.m_variance_thresholds;
+
+  m_n_log2pi = other.m_n_log2pi;
+  m_g_norm = other.m_g_norm;
+}
+
+
+void bob::learn::em::Gaussian::setNInputs(const size_t n_inputs) {
+  resize(n_inputs);
+}
+
+void bob::learn::em::Gaussian::resize(const size_t n_inputs) {
+  m_n_inputs = n_inputs;
+  m_mean.resize(m_n_inputs);
+  m_mean = 0;
+  m_variance.resize(m_n_inputs);
+  m_variance = 1;
+  m_variance_thresholds.resize(m_n_inputs);
+  m_variance_thresholds = 0;
+
+  // Re-compute g_norm, because m_n_inputs and m_variance
+  // have changed
+  preComputeNLog2Pi();
+  preComputeConstants();
+}
+
+void bob::learn::em::Gaussian::setMean(const blitz::Array<double,1> &mean) {
+  // Check and set
+  bob::core::array::assertSameShape(m_mean, mean);
+  m_mean = mean;
+}
+
+void bob::learn::em::Gaussian::setVariance(const blitz::Array<double,1> &variance) {
+  // Check and set
+  bob::core::array::assertSameShape(m_variance, variance);
+  m_variance = variance;
+
+  // Variance flooring
+  applyVarianceThresholds();
+}
+
+void bob::learn::em::Gaussian::setVarianceThresholds(const blitz::Array<double,1> &variance_thresholds) {
+  // Check and set
+  bob::core::array::assertSameShape(m_variance_thresholds, variance_thresholds);
+  m_variance_thresholds = variance_thresholds;
+
+  // Variance flooring
+  applyVarianceThresholds();
+}
+
+void bob::learn::em::Gaussian::setVarianceThresholds(const double value) {
+  blitz::Array<double,1> variance_thresholds(m_n_inputs);
+  variance_thresholds = value;
+  setVarianceThresholds(variance_thresholds);
+}
+
+void bob::learn::em::Gaussian::applyVarianceThresholds() {
+   // Apply variance flooring threshold
+  m_variance = blitz::where( m_variance < m_variance_thresholds, m_variance_thresholds, m_variance);
+
+  // Re-compute g_norm, because m_variance has changed
+  preComputeConstants();
+}
+
+double bob::learn::em::Gaussian::logLikelihood(const blitz::Array<double,1> &x) const {
+  // Check
+  bob::core::array::assertSameShape(x, m_mean);
+  return logLikelihood_(x);
+}
+
+double bob::learn::em::Gaussian::logLikelihood_(const blitz::Array<double,1> &x) const {
+  double z = blitz::sum(blitz::pow2(x - m_mean) / m_variance);
+  // Log Likelihood
+  return (-0.5 * (m_g_norm + z));
+}
+
+void bob::learn::em::Gaussian::preComputeNLog2Pi() {
+  m_n_log2pi = m_n_inputs * bob::math::Log::Log2Pi;
+}
+
+void bob::learn::em::Gaussian::preComputeConstants() {
+  m_g_norm = m_n_log2pi + blitz::sum(blitz::log(m_variance));
+}
+
+void bob::learn::em::Gaussian::save(bob::io::base::HDF5File& config) const {
+  config.setArray("m_mean", m_mean);
+  config.setArray("m_variance", m_variance);
+  config.setArray("m_variance_thresholds", m_variance_thresholds);
+  config.set("g_norm", m_g_norm);
+  int64_t v = static_cast<int64_t>(m_n_inputs);
+  config.set("m_n_inputs", v);
+}
+
+void bob::learn::em::Gaussian::load(bob::io::base::HDF5File& config) {
+  int64_t v = config.read<int64_t>("m_n_inputs");
+  m_n_inputs = static_cast<size_t>(v);
+
+  m_mean.resize(m_n_inputs);
+  m_variance.resize(m_n_inputs);
+  m_variance_thresholds.resize(m_n_inputs);
+
+  config.readArray("m_mean", m_mean);
+  config.readArray("m_variance", m_variance);
+  config.readArray("m_variance_thresholds", m_variance_thresholds);
+
+  preComputeNLog2Pi();
+  m_g_norm = config.read<double>("g_norm");
+}
+
+namespace bob { namespace learn { namespace em {
+  std::ostream& operator<<(std::ostream& os, const Gaussian& g) {
+    os << "Mean = " << g.m_mean << std::endl;
+    os << "Variance = " << g.m_variance << std::endl;
+    return os;
+  }
+} } }
diff --git a/bob/learn/em/cpp/ISVBase.cpp b/bob/learn/em/cpp/ISVBase.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c299013dc143b2b34b50a7802797b6728059e076
--- /dev/null
+++ b/bob/learn/em/cpp/ISVBase.cpp
@@ -0,0 +1,76 @@
+/**
+ * @date Tue Jan 27 16:02:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/ISVBase.h>
+#include <bob.core/array_copy.h>
+#include <bob.math/linear.h>
+#include <bob.math/inv.h>
+#include <bob.learn.em/LinearScoring.h>
+#include <limits>
+
+
+//////////////////// ISVBase ////////////////////
+bob::learn::em::ISVBase::ISVBase()
+{
+}
+
+bob::learn::em::ISVBase::ISVBase(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
+    const size_t ru):
+  m_base(ubm, ru, 1)
+{
+  blitz::Array<double,2>& V = m_base.updateV();
+  V = 0;
+}
+
+bob::learn::em::ISVBase::ISVBase(const bob::learn::em::ISVBase& other):
+  m_base(other.m_base)
+{
+}
+
+
+bob::learn::em::ISVBase::ISVBase(bob::io::base::HDF5File& config)
+{
+  load(config);
+}
+
+bob::learn::em::ISVBase::~ISVBase() {
+}
+
+void bob::learn::em::ISVBase::save(bob::io::base::HDF5File& config) const
+{
+  config.setArray("U", m_base.getU());
+  config.setArray("d", m_base.getD());
+}
+
+void bob::learn::em::ISVBase::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  blitz::Array<double,2> U = config.readArray<double,2>("U");
+  blitz::Array<double,1> d = config.readArray<double,1>("d");
+  const int ru = U.extent(1);
+  if (!m_base.getUbm())
+    m_base.resize(ru, 1, U.extent(0));
+  else
+    m_base.resize(ru, 1);
+  m_base.setU(U);
+  m_base.setD(d);
+  blitz::Array<double,2>& V = m_base.updateV();
+  V = 0;
+}
+
+bob::learn::em::ISVBase&
+bob::learn::em::ISVBase::operator=(const bob::learn::em::ISVBase& other)
+{
+  if (this != &other)
+  {
+    m_base = other.m_base;
+  }
+  return *this;
+}
+
diff --git a/bob/learn/em/cpp/ISVMachine.cpp b/bob/learn/em/cpp/ISVMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..894126d2cd20360feeb3418459ecb8879f0aa4e7
--- /dev/null
+++ b/bob/learn/em/cpp/ISVMachine.cpp
@@ -0,0 +1,182 @@
+/**
+ * @date Tue Jan 27 16:06:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/ISVMachine.h>
+#include <bob.core/array_copy.h>
+#include <bob.math/linear.h>
+#include <bob.math/inv.h>
+#include <bob.learn.em/LinearScoring.h>
+#include <limits>
+
+
+//////////////////// ISVMachine ////////////////////
+bob::learn::em::ISVMachine::ISVMachine():
+  m_z(1)
+{
+  resizeTmp();
+}
+
+bob::learn::em::ISVMachine::ISVMachine(const boost::shared_ptr<bob::learn::em::ISVBase> isv_base):
+  m_isv_base(isv_base),
+  m_z(isv_base->getSupervectorLength())
+{
+  if (!m_isv_base->getUbm())
+    throw std::runtime_error("No UBM was set in the JFA machine.");
+  updateCache();
+  resizeTmp();
+}
+
+
+bob::learn::em::ISVMachine::ISVMachine(const bob::learn::em::ISVMachine& other):
+  m_isv_base(other.m_isv_base),
+  m_z(bob::core::array::ccopy(other.m_z))
+{
+  updateCache();
+  resizeTmp();
+}
+
+bob::learn::em::ISVMachine::ISVMachine(bob::io::base::HDF5File& config)
+{
+  load(config);
+}
+
+bob::learn::em::ISVMachine::~ISVMachine() {
+}
+
+bob::learn::em::ISVMachine&
+bob::learn::em::ISVMachine::operator=(const bob::learn::em::ISVMachine& other)
+{
+  if (this != &other)
+  {
+    m_isv_base = other.m_isv_base;
+    m_z.reference(bob::core::array::ccopy(other.m_z));
+  }
+  return *this;
+}
+
+bool bob::learn::em::ISVMachine::operator==(const bob::learn::em::ISVMachine& other) const
+{
+  return (*m_isv_base == *(other.m_isv_base) &&
+          bob::core::array::isEqual(m_z, other.m_z));
+}
+
+bool bob::learn::em::ISVMachine::operator!=(const bob::learn::em::ISVMachine& b) const
+{
+  return !(this->operator==(b));
+}
+
+
+bool bob::learn::em::ISVMachine::is_similar_to(const bob::learn::em::ISVMachine& b,
+    const double r_epsilon, const double a_epsilon) const
+{
+  return (m_isv_base->is_similar_to(*(b.m_isv_base), r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_z, b.m_z, r_epsilon, a_epsilon));
+}
+
+void bob::learn::em::ISVMachine::save(bob::io::base::HDF5File& config) const
+{
+  config.setArray("z", m_z);
+}
+
+void bob::learn::em::ISVMachine::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  blitz::Array<double,1> z = config.readArray<double,1>("z");
+  if (!m_isv_base)
+    m_z.resize(z.extent(0));
+  setZ(z);
+  // update cache
+  updateCache();
+  resizeTmp();
+}
+
+void bob::learn::em::ISVMachine::setZ(const blitz::Array<double,1>& z)
+{
+  if(z.extent(0) != m_z.extent(0)) { //checks dimension
+    boost::format m("size of input vector `z' (%d) does not match the expected size (%d)");
+    m % z.extent(0) % m_z.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  m_z.reference(bob::core::array::ccopy(z));
+  // update cache
+  updateCache();
+}
+
+void bob::learn::em::ISVMachine::setISVBase(const boost::shared_ptr<bob::learn::em::ISVBase> isv_base)
+{
+  if (!isv_base->getUbm())
+    throw std::runtime_error("No UBM was set in the JFA machine.");
+  m_isv_base = isv_base;
+  // Resize variables
+  resize();
+}
+
+void bob::learn::em::ISVMachine::resize()
+{
+  m_z.resizeAndPreserve(getSupervectorLength());
+  updateCache();
+  resizeTmp();
+}
+
+void bob::learn::em::ISVMachine::resizeTmp()
+{
+  if (m_isv_base)
+  {
+    m_tmp_Ux.resize(getSupervectorLength());
+  }
+}
+
+void bob::learn::em::ISVMachine::updateCache()
+{
+  if (m_isv_base)
+  {
+    // m + Dz
+    m_cache_mDz.resize(getSupervectorLength());
+    m_cache_mDz = m_isv_base->getD()*m_z + m_isv_base->getUbm()->getMeanSupervector();
+    m_cache_x.resize(getDimRu());
+  }
+}
+
+void bob::learn::em::ISVMachine::estimateUx(const bob::learn::em::GMMStats& gmm_stats,
+  blitz::Array<double,1>& Ux)
+{
+  estimateX(gmm_stats, m_cache_x);
+  bob::math::prod(m_isv_base->getU(), m_cache_x, Ux);
+}
+
+double bob::learn::em::ISVMachine::forward(const bob::learn::em::GMMStats& input)
+{
+  return forward_(input);
+}
+
+double bob::learn::em::ISVMachine::forward(const bob::learn::em::GMMStats& gmm_stats,
+  const blitz::Array<double,1>& Ux)
+{
+  // Checks that a Base machine has been set
+  if (!m_isv_base) throw std::runtime_error("No UBM was set in the JFA machine.");
+
+  return bob::learn::em::linearScoring(m_cache_mDz,
+            m_isv_base->getUbm()->getMeanSupervector(), m_isv_base->getUbm()->getVarianceSupervector(),
+            gmm_stats, Ux, true);
+}
+
+double bob::learn::em::ISVMachine::forward_(const bob::learn::em::GMMStats& input)
+{
+  // Checks that a Base machine has been set
+  if(!m_isv_base) throw std::runtime_error("No UBM was set in the JFA machine.");
+
+  // Ux and GMMStats
+  estimateX(input, m_cache_x);
+  bob::math::prod(m_isv_base->getU(), m_cache_x, m_tmp_Ux);
+
+  return bob::learn::em::linearScoring(m_cache_mDz,
+            m_isv_base->getUbm()->getMeanSupervector(), m_isv_base->getUbm()->getVarianceSupervector(),
+            input, m_tmp_Ux, true);
+}
+
diff --git a/bob/learn/em/cpp/ISVTrainer.cpp b/bob/learn/em/cpp/ISVTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..cd3caa42ad3c6348a73d6ac9081123609def5762
--- /dev/null
+++ b/bob/learn/em/cpp/ISVTrainer.cpp
@@ -0,0 +1,130 @@
+/**
+ * @date Tue Jul 19 12:16:17 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Joint Factor Analysis Trainer
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/ISVTrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.math/linear.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+
+
+//////////////////////////// ISVTrainer ///////////////////////////
+bob::learn::em::ISVTrainer::ISVTrainer(const double relevance_factor):
+  m_relevance_factor(relevance_factor),
+  m_rng(new boost::mt19937())
+{}
+
+bob::learn::em::ISVTrainer::ISVTrainer(const bob::learn::em::ISVTrainer& other):
+  m_rng(other.m_rng)
+{
+  m_relevance_factor      = other.m_relevance_factor;
+}
+
+bob::learn::em::ISVTrainer::~ISVTrainer()
+{}
+
+bob::learn::em::ISVTrainer& bob::learn::em::ISVTrainer::operator=
+(const bob::learn::em::ISVTrainer& other)
+{
+  if (this != &other)
+  {
+    m_rng                   = other.m_rng;
+    m_relevance_factor      = other.m_relevance_factor;
+  }
+  return *this;
+}
+
+bool bob::learn::em::ISVTrainer::operator==(const bob::learn::em::ISVTrainer& b) const
+{
+  return m_rng == b.m_rng && 
+         m_relevance_factor == b.m_relevance_factor;
+}
+
+bool bob::learn::em::ISVTrainer::operator!=(const bob::learn::em::ISVTrainer& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::ISVTrainer::is_similar_to(const bob::learn::em::ISVTrainer& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  return  m_rng == b.m_rng && 
+          m_relevance_factor == b.m_relevance_factor;
+}
+
+void bob::learn::em::ISVTrainer::initialize(bob::learn::em::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
+  m_base_trainer.initializeXYZ(ar);
+
+  blitz::Array<double,2>& U = machine.updateU();
+  bob::core::array::randn(*m_rng, U);
+  initializeD(machine);
+  machine.precompute();
+}
+
+void bob::learn::em::ISVTrainer::initializeD(bob::learn::em::ISVBase& machine) const
+{
+  // D = sqrt(variance(UBM) / relevance_factor)
+  blitz::Array<double,1> d = machine.updateD();
+  d = sqrt(machine.getBase().getUbmVariance() / m_relevance_factor);
+}
+
+void bob::learn::em::ISVTrainer::eStep(bob::learn::em::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  m_base_trainer.resetXYZ();
+
+  const bob::learn::em::FABase& base = machine.getBase();
+  m_base_trainer.updateX(base, ar);
+  m_base_trainer.updateZ(base, ar);
+  m_base_trainer.computeAccumulatorsU(base, ar);
+}
+
+void bob::learn::em::ISVTrainer::mStep(bob::learn::em::ISVBase& machine)
+{
+  blitz::Array<double,2>& U = machine.updateU();
+  m_base_trainer.updateU(U);
+  machine.precompute();
+}
+
+double bob::learn::em::ISVTrainer::computeLikelihood(bob::learn::em::ISVBase& machine)
+{
+  // TODO
+  return 0;
+}
+
+void bob::learn::em::ISVTrainer::enrol(bob::learn::em::ISVMachine& machine,
+  const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& ar,
+  const size_t n_iter)
+{
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > vvec;
+  vvec.push_back(ar);
+
+  const bob::learn::em::FABase& fb = machine.getISVBase()->getBase();
+
+  m_base_trainer.initUbmNidSumStatistics(fb, vvec);
+  m_base_trainer.initializeXYZ(vvec);
+
+  for (size_t i=0; i<n_iter; ++i) {
+    m_base_trainer.updateX(fb, vvec);
+    m_base_trainer.updateZ(fb, vvec);
+  }
+
+  const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
+  machine.setZ(z);
+}
+
+
+
diff --git a/bob/learn/em/cpp/IVectorMachine.cpp b/bob/learn/em/cpp/IVectorMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f9f4683ac769212648df5c331287ba751591d9bf
--- /dev/null
+++ b/bob/learn/em/cpp/IVectorMachine.cpp
@@ -0,0 +1,249 @@
+/**
+ * @date Sat Mar 30 21:00:00 2013 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/IVectorMachine.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/check.h>
+#include <bob.math/linear.h>
+#include <bob.math/linsolve.h>
+
+bob::learn::em::IVectorMachine::IVectorMachine()
+{
+}
+
+bob::learn::em::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
+    const size_t rt, const double variance_threshold):
+  m_ubm(ubm), m_rt(rt),
+  m_T(getSupervectorLength(),rt), m_sigma(getSupervectorLength()),
+  m_variance_threshold(variance_threshold)
+{
+  resizePrecompute();
+}
+
+bob::learn::em::IVectorMachine::IVectorMachine(const bob::learn::em::IVectorMachine& other):
+  m_ubm(other.m_ubm), m_rt(other.m_rt),
+  m_T(bob::core::array::ccopy(other.m_T)),
+  m_sigma(bob::core::array::ccopy(other.m_sigma)),
+  m_variance_threshold(other.m_variance_threshold)
+{
+  resizePrecompute();
+}
+
+bob::learn::em::IVectorMachine::IVectorMachine(bob::io::base::HDF5File& config)
+{
+  load(config);
+}
+
+bob::learn::em::IVectorMachine::~IVectorMachine() {
+}
+
+void bob::learn::em::IVectorMachine::save(bob::io::base::HDF5File& config) const
+{
+  config.setArray("m_T", m_T);
+  config.setArray("m_sigma", m_sigma);
+  config.set("m_variance_threshold", m_variance_threshold);
+}
+
+void bob::learn::em::IVectorMachine::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  m_T.reference(config.readArray<double,2>("m_T"));
+  m_rt = m_T.extent(1);
+  m_sigma.reference(config.readArray<double,1>("m_sigma"));
+  m_variance_threshold = config.read<double>("m_variance_threshold");
+  resizePrecompute();
+}
+
+void bob::learn::em::IVectorMachine::resize(const size_t rt)
+{
+  m_rt = rt;
+  m_T.resizeAndPreserve(m_T.extent(0), rt);
+  resizePrecompute();
+}
+
+bob::learn::em::IVectorMachine&
+bob::learn::em::IVectorMachine::operator=(const bob::learn::em::IVectorMachine& other)
+{
+  if (this != &other)
+  {
+    m_ubm = other.m_ubm;
+    m_rt = other.m_rt;
+    m_T.reference(bob::core::array::ccopy(other.m_T));
+    m_sigma.reference(bob::core::array::ccopy(other.m_sigma));
+    m_variance_threshold = other.m_variance_threshold;
+    resizePrecompute();
+  }
+  return *this;
+}
+
+bool bob::learn::em::IVectorMachine::operator==(const IVectorMachine& b) const
+{
+  return (((m_ubm && b.m_ubm) && *m_ubm == *(b.m_ubm)) || (!m_ubm && !b.m_ubm)) &&
+         m_rt == b.m_rt &&
+         bob::core::array::isEqual(m_T, b.m_T) &&
+         bob::core::array::isEqual(m_sigma, b.m_sigma) &&
+         m_variance_threshold == b.m_variance_threshold;
+}
+
+bool bob::learn::em::IVectorMachine::operator!=(const bob::learn::em::IVectorMachine& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::IVectorMachine::is_similar_to(const IVectorMachine& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  // TODO: update with new is_similar_to method
+  return (((m_ubm && b.m_ubm) && m_ubm->is_similar_to(*(b.m_ubm), r_epsilon)) || (!m_ubm && !b.m_ubm)) &&
+          m_rt == b.m_rt &&
+          bob::core::array::isClose(m_T, b.m_T, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_sigma, b.m_sigma, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_variance_threshold, b.m_variance_threshold, r_epsilon, a_epsilon);
+}
+
+void bob::learn::em::IVectorMachine::setUbm(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm)
+{
+  m_ubm = ubm;
+  resizePrecompute();
+}
+
+void bob::learn::em::IVectorMachine::setT(const blitz::Array<double,2>& T)
+{
+  bob::core::array::assertSameShape(m_T, T);
+  m_T = T;
+  // Update cache
+  precompute();
+}
+
+void bob::learn::em::IVectorMachine::setSigma(const blitz::Array<double,1>& sigma)
+{
+  bob::core::array::assertSameShape(m_sigma, sigma);
+  m_sigma = sigma;
+  // Update cache
+  precompute();
+}
+
+
+void bob::learn::em::IVectorMachine::setVarianceThreshold(const double thd)
+{
+  m_variance_threshold = thd;
+  // Update cache
+  precompute();
+}
+
+void bob::learn::em::IVectorMachine::applyVarianceThreshold()
+{
+  // Apply variance flooring threshold
+  m_sigma = blitz::where(m_sigma < m_variance_threshold, m_variance_threshold, m_sigma);
+}
+
+void bob::learn::em::IVectorMachine::precompute()
+{
+  if (m_ubm)
+  {
+    // Apply variance threshold
+    applyVarianceThreshold();
+
+    blitz::firstIndex i;
+    blitz::secondIndex j;
+    blitz::Range rall = blitz::Range::all();
+    const int C = (int)m_ubm->getNGaussians();
+    const int D = (int)m_ubm->getNInputs();
+    // T_{c}^{T}.sigma_{c}^{-1}
+    for (int c=0; c<C; ++c)
+    {
+      blitz::Array<double,2> Tct_sigmacInv = m_cache_Tct_sigmacInv(c, rall, rall);
+      blitz::Array<double,2> Tc = m_T(blitz::Range(c*D,(c+1)*D-1), rall);
+      blitz::Array<double,2> Tct = Tc.transpose(1,0);
+      blitz::Array<double,1> sigma_c = m_sigma(blitz::Range(c*D,(c+1)*D-1));
+      Tct_sigmacInv = Tct(i,j) / sigma_c(j);
+    }
+
+    // T_{c}^{T}.sigma_{c}^{-1}.T_{c}
+    for (int c=0; c<C; ++c)
+    {
+      blitz::Array<double,2> Tc = m_T(blitz::Range(c*D,(c+1)*D-1), rall);
+      blitz::Array<double,2> Tct_sigmacInv = m_cache_Tct_sigmacInv(c, rall, rall);
+      blitz::Array<double,2> Tct_sigmacInv_Tc = m_cache_Tct_sigmacInv_Tc(c, rall, rall);
+      bob::math::prod(Tct_sigmacInv, Tc, Tct_sigmacInv_Tc);
+    }
+  }
+}
+
+void bob::learn::em::IVectorMachine::resizePrecompute()
+{
+  resizeCache();
+  resizeTmp();
+  precompute();
+}
+
+void bob::learn::em::IVectorMachine::resizeCache()
+{
+  if (m_ubm)
+  {
+    const int C = (int)m_ubm->getNGaussians();
+    const int D = (int)m_ubm->getNInputs();
+    m_cache_Tct_sigmacInv.resize(C, (int)m_rt, D);
+    m_cache_Tct_sigmacInv_Tc.resize(C, (int)m_rt, (int)m_rt);
+  }
+}
+
+void bob::learn::em::IVectorMachine::resizeTmp()
+{
+  if (m_ubm)
+    m_tmp_d.resize(m_ubm->getNInputs());
+  m_tmp_t1.resize(m_rt);
+  m_tmp_t2.resize(m_rt);
+  m_tmp_tt.resize(m_rt, m_rt);
+}
+
+void bob::learn::em::IVectorMachine::forward(const bob::learn::em::GMMStats& gs,
+  blitz::Array<double,1>& ivector) const
+{
+  bob::core::array::assertSameDimensionLength(ivector.extent(0), (int)m_rt);  
+  forward_(gs, ivector);
+}
+
+void bob::learn::em::IVectorMachine::computeIdTtSigmaInvT(
+  const bob::learn::em::GMMStats& gs, blitz::Array<double,2>& output) const
+{
+  // Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
+  blitz::Range rall = blitz::Range::all();
+  bob::math::eye(output);
+  for (int c=0; c<(int)getNGaussians(); ++c)
+    output += gs.n(c) * m_cache_Tct_sigmacInv_Tc(c, rall, rall);
+}
+
+void bob::learn::em::IVectorMachine::computeTtSigmaInvFnorm(
+  const bob::learn::em::GMMStats& gs, blitz::Array<double,1>& output) const
+{
+  // Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
+  blitz::Range rall = blitz::Range::all();
+  output = 0;
+  for (int c=0; c<(int)getNGaussians(); ++c)
+  {
+    m_tmp_d = gs.sumPx(c,rall) - gs.n(c) * m_ubm->getGaussian(c)->getMean();
+    blitz::Array<double,2> Tct_sigmacInv = m_cache_Tct_sigmacInv(c, rall, rall);
+    bob::math::prod(Tct_sigmacInv, m_tmp_d, m_tmp_t2);
+
+    output += m_tmp_t2;
+  }
+}
+
+void bob::learn::em::IVectorMachine::forward_(const bob::learn::em::GMMStats& gs,
+  blitz::Array<double,1>& ivector) const
+{
+  // Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
+  computeIdTtSigmaInvT(gs, m_tmp_tt);
+
+  // Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
+  computeTtSigmaInvFnorm(gs, m_tmp_t1);
+
+  // Solves m_tmp_tt.ivector = m_tmp_t1
+  bob::math::linsolve(m_tmp_tt, ivector, m_tmp_t1);
+}
+
diff --git a/bob/learn/em/cpp/IVectorTrainer.cpp b/bob/learn/em/cpp/IVectorTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..82ad2504a3d09881b807d4b98c1effc0566d460c
--- /dev/null
+++ b/bob/learn/em/cpp/IVectorTrainer.cpp
@@ -0,0 +1,228 @@
+/**
+ * @date Sun Mar 31 20:15:00 2013 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/IVectorTrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+
+#include <bob.math/linear.h>
+#include <bob.math/linsolve.h>
+
+bob::learn::em::IVectorTrainer::IVectorTrainer(const bool update_sigma):
+  m_update_sigma(update_sigma),
+  m_rng(new boost::mt19937())
+{}
+
+bob::learn::em::IVectorTrainer::IVectorTrainer(const bob::learn::em::IVectorTrainer& other):
+  m_update_sigma(other.m_update_sigma)
+{
+  m_rng                   = other.m_rng;
+  m_acc_Nij_wij2.reference(bob::core::array::ccopy(other.m_acc_Nij_wij2));
+  m_acc_Fnormij_wij.reference(bob::core::array::ccopy(other.m_acc_Fnormij_wij));
+  m_acc_Nij.reference(bob::core::array::ccopy(other.m_acc_Nij));
+  m_acc_Snormij.reference(bob::core::array::ccopy(other.m_acc_Snormij));
+
+  m_tmp_wij.reference(bob::core::array::ccopy(other.m_tmp_wij));
+  m_tmp_wij2.reference(bob::core::array::ccopy(other.m_tmp_wij2));
+  m_tmp_d1.reference(bob::core::array::ccopy(other.m_tmp_d1));
+  m_tmp_t1.reference(bob::core::array::ccopy(other.m_tmp_t1));
+  m_tmp_dd1.reference(bob::core::array::ccopy(other.m_tmp_dd1));
+  m_tmp_dt1.reference(bob::core::array::ccopy(other.m_tmp_dt1));
+  m_tmp_tt1.reference(bob::core::array::ccopy(other.m_tmp_tt1));
+  m_tmp_tt2.reference(bob::core::array::ccopy(other.m_tmp_tt2));
+}
+
+bob::learn::em::IVectorTrainer::~IVectorTrainer()
+{
+}
+
+void bob::learn::em::IVectorTrainer::initialize(
+  bob::learn::em::IVectorMachine& machine)
+{
+
+  const int C = machine.getNGaussians();
+  const int D = machine.getNInputs();
+  const int Rt = machine.getDimRt();
+
+  // Cache
+  m_acc_Nij_wij2.resize(C,Rt,Rt);
+  m_acc_Fnormij_wij.resize(C,D,Rt);
+  if (m_update_sigma)
+  {
+    m_acc_Nij.resize(C);
+    m_acc_Snormij.resize(C,D);
+  }
+
+  // Tmp
+  m_tmp_wij.resize(Rt);
+  m_tmp_wij2.resize(Rt,Rt);
+  m_tmp_d1.resize(D);
+  m_tmp_t1.resize(Rt);
+
+  m_tmp_dt1.resize(D,Rt);
+  m_tmp_tt1.resize(Rt,Rt);
+  m_tmp_tt2.resize(Rt,Rt);
+  if (m_update_sigma)
+    m_tmp_dd1.resize(D,D);
+
+  // Initializes \f$T\f$ and \f$\Sigma\f$ of the machine
+  blitz::Array<double,2>& T = machine.updateT();
+  bob::core::array::randn(*m_rng, T);
+  blitz::Array<double,1>& sigma = machine.updateSigma();
+  sigma = machine.getUbm()->getVarianceSupervector();
+  machine.precompute();
+}
+
+void bob::learn::em::IVectorTrainer::eStep(
+  bob::learn::em::IVectorMachine& machine,
+  const std::vector<bob::learn::em::GMMStats>& data)
+{
+  blitz::Range rall = blitz::Range::all();
+  const int C = machine.getNGaussians();
+
+  // Reinitializes accumulators to 0
+  m_acc_Nij_wij2 = 0.;
+  m_acc_Fnormij_wij = 0.;
+  if (m_update_sigma)
+  {
+    m_acc_Nij = 0.;
+    m_acc_Snormij = 0.;
+  }
+  for (std::vector<bob::learn::em::GMMStats>::const_iterator it = data.begin();
+       it != data.end(); ++it)
+  {
+    // Computes E{wij} and E{wij.wij^{T}}
+    // a. Computes \f$T^{T} \Sigma^{-1} F_{norm}\f$
+    machine.computeTtSigmaInvFnorm(*it, m_tmp_t1);
+    // b. Computes \f$Id + T^{T} \Sigma^{-1} T\f$
+    machine.computeIdTtSigmaInvT(*it, m_tmp_tt1);
+    // c. Computes \f$(Id + T^{T} \Sigma^{-1} T)^{-1}\f$
+
+    bob::math::inv(m_tmp_tt1, m_tmp_tt2);
+    // d. Computes \f$E{wij} = (Id + T^{T} \Sigma^{-1} T)^{-1} T^{T} \Sigma^{-1} F_{norm}\f$
+    bob::math::prod(m_tmp_tt2, m_tmp_t1, m_tmp_wij); // E{wij}
+    // e.  Computes \f$E{wij}.E{wij^{T}}\f$
+    bob::math::prod(m_tmp_wij, m_tmp_wij, m_tmp_wij2);
+    // f. Computes \f$E{wij.wij^{T}} = (Id + T^{T} \Sigma^{-1} T)^{-1} + E{wij}.E{wij^{T}}\f$
+    m_tmp_wij2 += m_tmp_tt2; // E{wij.wij^{T}}
+
+    if (m_update_sigma)
+      m_acc_Nij += (*it).n;
+
+    for (int c=0; c<C; ++c)
+    {
+      blitz::Array<double,2> acc_Nij_wij2_c = m_acc_Nij_wij2(c,rall,rall);
+      blitz::Array<double,2> acc_Fnormij_wij = m_acc_Fnormij_wij(c,rall,rall);
+      // acc_Nij_wij2_c += Nijc . E{wij.wij^{T}}
+      acc_Nij_wij2_c += (*it).n(c) * m_tmp_wij2;
+      blitz::Array<double,1> mc = machine.getUbm()->getGaussian(c)->getMean();
+      // m_tmp_d1 = Fijc - Nijc * ubmmean_{c}
+      m_tmp_d1 = (*it).sumPx(c,rall) - (*it).n(c)*mc; // Fnorm_c
+      // m_tmp_dt1 = (Fijc - Nijc * ubmmean_{c}).E{wij}^{T}
+      bob::math::prod(m_tmp_d1, m_tmp_wij, m_tmp_dt1);
+      // acc_Fnormij_wij += (Fijc - Nijc * ubmmean_{c}).E{wij}^{T}
+      acc_Fnormij_wij += m_tmp_dt1;
+      if (m_update_sigma)
+      {
+        blitz::Array<double,1> acc_Snormij_c = m_acc_Snormij(c,rall);
+        acc_Snormij_c += (*it).sumPxx(c,rall) - mc*((*it).sumPx(c,rall) + m_tmp_d1);
+      }
+    }
+  }
+}
+
+void bob::learn::em::IVectorTrainer::mStep(
+  bob::learn::em::IVectorMachine& machine)
+{
+  blitz::Range rall = blitz::Range::all();
+  blitz::Array<double,2>& T = machine.updateT();
+  blitz::Array<double,1>& sigma = machine.updateSigma();
+  const int C = (int)machine.getNGaussians();
+  const int D = (int)machine.getNInputs();
+  for (int c=0; c<C; ++c)
+  {
+    // Solves linear system A.T = B to update T, based on accumulators of
+    // the eStep()
+    blitz::Array<double,2> acc_Nij_wij2_c = m_acc_Nij_wij2(c,rall,rall);
+    blitz::Array<double,2> tacc_Nij_wij2_c = acc_Nij_wij2_c.transpose(1,0);
+    blitz::Array<double,2> acc_Fnormij_wij_c = m_acc_Fnormij_wij(c,rall,rall);
+    blitz::Array<double,2> tacc_Fnormij_wij_c = acc_Fnormij_wij_c.transpose(1,0);
+    blitz::Array<double,2> T_c = T(blitz::Range(c*D,(c+1)*D-1),rall);
+    blitz::Array<double,2> Tt_c = T_c.transpose(1,0);
+    if (blitz::all(acc_Nij_wij2_c == 0)) // TODO
+      Tt_c = 0;
+    else
+      bob::math::linsolve(tacc_Nij_wij2_c, Tt_c, tacc_Fnormij_wij_c);
+    if (m_update_sigma)
+    {
+      blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*D,(c+1)*D-1));
+      bob::math::prod(acc_Fnormij_wij_c, Tt_c, m_tmp_dd1);
+      bob::math::diag(m_tmp_dd1, m_tmp_d1);
+      sigma_c = (m_acc_Snormij(c,rall) - m_tmp_d1) / m_acc_Nij(c);
+    }
+  }
+  machine.precompute();
+}
+
+
+bob::learn::em::IVectorTrainer& bob::learn::em::IVectorTrainer::operator=
+  (const bob::learn::em::IVectorTrainer &other)
+{
+  if (this != &other)
+  {    
+    m_update_sigma = other.m_update_sigma;
+
+    m_acc_Nij_wij2.reference(bob::core::array::ccopy(other.m_acc_Nij_wij2));
+    m_acc_Fnormij_wij.reference(bob::core::array::ccopy(other.m_acc_Fnormij_wij));
+    m_acc_Nij.reference(bob::core::array::ccopy(other.m_acc_Nij));
+    m_acc_Snormij.reference(bob::core::array::ccopy(other.m_acc_Snormij));
+
+    m_tmp_wij.reference(bob::core::array::ccopy(other.m_tmp_wij));
+    m_tmp_wij2.reference(bob::core::array::ccopy(other.m_tmp_wij2));
+    m_tmp_d1.reference(bob::core::array::ccopy(other.m_tmp_d1));
+    m_tmp_t1.reference(bob::core::array::ccopy(other.m_tmp_t1));
+    m_tmp_dd1.reference(bob::core::array::ccopy(other.m_tmp_dd1));
+    m_tmp_dt1.reference(bob::core::array::ccopy(other.m_tmp_dt1));
+    m_tmp_tt1.reference(bob::core::array::ccopy(other.m_tmp_tt1));
+    m_tmp_tt2.reference(bob::core::array::ccopy(other.m_tmp_tt2));
+  }
+  return *this;
+}
+
+bool bob::learn::em::IVectorTrainer::operator==
+  (const bob::learn::em::IVectorTrainer &other) const
+{
+  return m_update_sigma == other.m_update_sigma &&
+         bob::core::array::isEqual(m_acc_Nij_wij2, other.m_acc_Nij_wij2) &&
+         bob::core::array::isEqual(m_acc_Fnormij_wij, other.m_acc_Fnormij_wij) &&
+         bob::core::array::isEqual(m_acc_Nij, other.m_acc_Nij) &&
+         bob::core::array::isEqual(m_acc_Snormij, other.m_acc_Snormij);
+}
+
+bool bob::learn::em::IVectorTrainer::operator!=
+  (const bob::learn::em::IVectorTrainer &other) const
+{
+  return !(this->operator==(other));
+}
+
+bool bob::learn::em::IVectorTrainer::is_similar_to
+  (const bob::learn::em::IVectorTrainer &other, const double r_epsilon,
+   const double a_epsilon) const
+{
+  return m_update_sigma == other.m_update_sigma &&
+         bob::core::array::isClose(m_acc_Nij_wij2, other.m_acc_Nij_wij2, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_acc_Fnormij_wij, other.m_acc_Fnormij_wij, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_acc_Nij, other.m_acc_Nij, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_acc_Snormij, other.m_acc_Snormij, r_epsilon, a_epsilon);
+}
+
diff --git a/bob/learn/em/cpp/JFABase.cpp b/bob/learn/em/cpp/JFABase.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..b80bfd31d959f8e0e7fc3004a3ee0980b49e4a50
--- /dev/null
+++ b/bob/learn/em/cpp/JFABase.cpp
@@ -0,0 +1,75 @@
+/**
+ * @date Tue Jan 27 15:54:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/JFABase.h>
+#include <bob.core/array_copy.h>
+#include <bob.math/linear.h>
+#include <bob.math/inv.h>
+#include <bob.learn.em/LinearScoring.h>
+#include <limits>
+
+
+//////////////////// JFABase ////////////////////
+bob::learn::em::JFABase::JFABase()
+{
+}
+
+bob::learn::em::JFABase::JFABase(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
+    const size_t ru, const size_t rv):
+  m_base(ubm, ru, rv)
+{
+}
+
+bob::learn::em::JFABase::JFABase(const bob::learn::em::JFABase& other):
+  m_base(other.m_base)
+{
+}
+
+
+bob::learn::em::JFABase::JFABase(bob::io::base::HDF5File& config)
+{
+  load(config);
+}
+
+bob::learn::em::JFABase::~JFABase() {
+}
+
+void bob::learn::em::JFABase::save(bob::io::base::HDF5File& config) const
+{
+  config.setArray("U", m_base.getU());
+  config.setArray("V", m_base.getV());
+  config.setArray("d", m_base.getD());
+}
+
+void bob::learn::em::JFABase::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  blitz::Array<double,2> U = config.readArray<double,2>("U");
+  blitz::Array<double,2> V = config.readArray<double,2>("V");
+  blitz::Array<double,1> d = config.readArray<double,1>("d");
+  const int ru = U.extent(1);
+  const int rv = V.extent(1);
+  if (!m_base.getUbm())
+    m_base.resize(ru, rv, U.extent(0));
+  else
+    m_base.resize(ru, rv);
+  m_base.setU(U);
+  m_base.setV(V);
+  m_base.setD(d);
+}
+
+bob::learn::em::JFABase&
+bob::learn::em::JFABase::operator=(const bob::learn::em::JFABase& other)
+{
+  if (this != &other)
+  {
+    m_base = other.m_base;
+  }
+  return *this;
+}
diff --git a/bob/learn/em/cpp/JFAMachine.cpp b/bob/learn/em/cpp/JFAMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..508c5008bcb59479e4a966eb6de31a6f14649d99
--- /dev/null
+++ b/bob/learn/em/cpp/JFAMachine.cpp
@@ -0,0 +1,206 @@
+/**
+ * @date Tue Jan 27 16:47:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/JFAMachine.h>
+#include <bob.core/array_copy.h>
+#include <bob.math/linear.h>
+#include <bob.math/inv.h>
+#include <bob.learn.em/LinearScoring.h>
+#include <limits>
+
+
+//////////////////// JFAMachine ////////////////////
+bob::learn::em::JFAMachine::JFAMachine():
+  m_y(1), m_z(1)
+{
+  resizeTmp();
+}
+
+bob::learn::em::JFAMachine::JFAMachine(const boost::shared_ptr<bob::learn::em::JFABase> jfa_base):
+  m_jfa_base(jfa_base),
+  m_y(jfa_base->getDimRv()), m_z(jfa_base->getSupervectorLength())
+{
+  if (!m_jfa_base->getUbm()) throw std::runtime_error("No UBM was set in the JFA machine.");
+  updateCache();
+  resizeTmp();
+}
+
+
+bob::learn::em::JFAMachine::JFAMachine(const bob::learn::em::JFAMachine& other):
+  m_jfa_base(other.m_jfa_base),
+  m_y(bob::core::array::ccopy(other.m_y)),
+  m_z(bob::core::array::ccopy(other.m_z))
+{
+  updateCache();
+  resizeTmp();
+}
+
+bob::learn::em::JFAMachine::JFAMachine(bob::io::base::HDF5File& config)
+{
+  load(config);
+}
+
+bob::learn::em::JFAMachine::~JFAMachine() {
+}
+
+bob::learn::em::JFAMachine&
+bob::learn::em::JFAMachine::operator=(const bob::learn::em::JFAMachine& other)
+{
+  if (this != &other)
+  {
+    m_jfa_base = other.m_jfa_base;
+    m_y.reference(bob::core::array::ccopy(other.m_y));
+    m_z.reference(bob::core::array::ccopy(other.m_z));
+  }
+  return *this;
+}
+
+bool bob::learn::em::JFAMachine::operator==(const bob::learn::em::JFAMachine& other) const
+{
+  return (*m_jfa_base == *(other.m_jfa_base) &&
+          bob::core::array::isEqual(m_y, other.m_y) &&
+          bob::core::array::isEqual(m_z, other.m_z));
+}
+
+bool bob::learn::em::JFAMachine::operator!=(const bob::learn::em::JFAMachine& b) const
+{
+  return !(this->operator==(b));
+}
+
+
+bool bob::learn::em::JFAMachine::is_similar_to(const bob::learn::em::JFAMachine& b,
+    const double r_epsilon, const double a_epsilon) const
+{
+  return (m_jfa_base->is_similar_to(*(b.m_jfa_base), r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_y, b.m_y, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_z, b.m_z, r_epsilon, a_epsilon));
+}
+
+void bob::learn::em::JFAMachine::save(bob::io::base::HDF5File& config) const
+{
+  config.setArray("y", m_y);
+  config.setArray("z", m_z);
+}
+
+void bob::learn::em::JFAMachine::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  blitz::Array<double,1> y = config.readArray<double,1>("y");
+  blitz::Array<double,1> z = config.readArray<double,1>("z");
+  if (!m_jfa_base)
+  {
+    m_y.resize(y.extent(0));
+    m_z.resize(z.extent(0));
+  }
+  setY(y);
+  setZ(z);
+  // update cache
+  updateCache();
+  resizeTmp();
+}
+
+
+void bob::learn::em::JFAMachine::setY(const blitz::Array<double,1>& y)
+{
+  if(y.extent(0) != m_y.extent(0)) { //checks dimension
+    boost::format m("size of input vector `y' (%d) does not match the expected size (%d)");
+    m % y.extent(0) % m_y.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  m_y.reference(bob::core::array::ccopy(y));
+  // update cache
+  updateCache();
+}
+
+void bob::learn::em::JFAMachine::setZ(const blitz::Array<double,1>& z)
+{
+  if(z.extent(0) != m_z.extent(0)) { //checks dimension
+    boost::format m("size of input vector `z' (%d) does not match the expected size (%d)");
+    m % z.extent(0) % m_z.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  m_z.reference(bob::core::array::ccopy(z));
+  // update cache
+  updateCache();
+}
+
+void bob::learn::em::JFAMachine::setJFABase(const boost::shared_ptr<bob::learn::em::JFABase> jfa_base)
+{
+  if (!jfa_base->getUbm())
+    throw std::runtime_error("No UBM was set in the JFA machine.");
+  m_jfa_base = jfa_base;
+  // Resize variables
+  resize();
+}
+
+void bob::learn::em::JFAMachine::resize()
+{
+  m_y.resizeAndPreserve(getDimRv());
+  m_z.resizeAndPreserve(getSupervectorLength());
+  updateCache();
+  resizeTmp();
+}
+
+void bob::learn::em::JFAMachine::resizeTmp()
+{
+  if (m_jfa_base)
+  {
+    m_tmp_Ux.resize(getSupervectorLength());
+  }
+}
+
+void bob::learn::em::JFAMachine::updateCache()
+{
+  if (m_jfa_base)
+  {
+    // m + Vy + Dz
+    m_cache_mVyDz.resize(getSupervectorLength());
+    bob::math::prod(m_jfa_base->getV(), m_y, m_cache_mVyDz);
+    m_cache_mVyDz += m_jfa_base->getD()*m_z + m_jfa_base->getUbm()->getMeanSupervector();
+    m_cache_x.resize(getDimRu());
+  }
+}
+
+void bob::learn::em::JFAMachine::estimateUx(const bob::learn::em::GMMStats& gmm_stats,
+  blitz::Array<double,1>& Ux)
+{
+  estimateX(gmm_stats, m_cache_x);
+  bob::math::prod(m_jfa_base->getU(), m_cache_x, Ux);
+}
+
+double bob::learn::em::JFAMachine::forward(const bob::learn::em::GMMStats& input)
+{
+  return forward_(input);
+}
+
+double bob::learn::em::JFAMachine::forward(const bob::learn::em::GMMStats& gmm_stats,
+  const blitz::Array<double,1>& Ux)
+{
+  // Checks that a Base machine has been set
+  if (!m_jfa_base) throw std::runtime_error("No UBM was set in the JFA machine.");
+
+  return bob::learn::em::linearScoring(m_cache_mVyDz,
+            m_jfa_base->getUbm()->getMeanSupervector(), m_jfa_base->getUbm()->getVarianceSupervector(),
+            gmm_stats, Ux, true);
+}
+
+double bob::learn::em::JFAMachine::forward_(const bob::learn::em::GMMStats& input)
+{
+  // Checks that a Base machine has been set
+  if (!m_jfa_base) throw std::runtime_error("No UBM was set in the JFA machine.");
+
+  // Ux and GMMStats
+  estimateX(input, m_cache_x);
+  bob::math::prod(m_jfa_base->getU(), m_cache_x, m_tmp_Ux);
+
+  return bob::learn::em::linearScoring(m_cache_mVyDz,
+            m_jfa_base->getUbm()->getMeanSupervector(), m_jfa_base->getUbm()->getVarianceSupervector(),
+            input, m_tmp_Ux, true);
+}
+
diff --git a/bob/learn/em/cpp/JFATrainer.cpp b/bob/learn/em/cpp/JFATrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9feaea7f267a75ef2e31fc81a5759b9c0aa3721c
--- /dev/null
+++ b/bob/learn/em/cpp/JFATrainer.cpp
@@ -0,0 +1,200 @@
+/**
+ * @date Tue Jul 19 12:16:17 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Joint Factor Analysis Trainer
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/JFATrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.math/linear.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+
+
+//////////////////////////// JFATrainer ///////////////////////////
+bob::learn::em::JFATrainer::JFATrainer():
+  m_rng(new boost::mt19937())
+{}
+
+bob::learn::em::JFATrainer::JFATrainer(const bob::learn::em::JFATrainer& other):
+ m_rng(other.m_rng)
+{}
+
+bob::learn::em::JFATrainer::~JFATrainer()
+{}
+
+bob::learn::em::JFATrainer& bob::learn::em::JFATrainer::operator=
+(const bob::learn::em::JFATrainer& other)
+{
+  if (this != &other)
+  {
+    //m_max_iterations = other.m_max_iterations;
+    m_rng = other.m_rng;
+  }
+  return *this;
+}
+
+bool bob::learn::em::JFATrainer::operator==(const bob::learn::em::JFATrainer& b) const
+{
+  //return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
+  return *m_rng == *(b.m_rng);
+}
+
+bool bob::learn::em::JFATrainer::operator!=(const bob::learn::em::JFATrainer& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::JFATrainer::is_similar_to(const bob::learn::em::JFATrainer& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  //return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
+  return *m_rng == *(b.m_rng);
+}
+
+void bob::learn::em::JFATrainer::initialize(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
+  m_base_trainer.initializeXYZ(ar);
+
+  blitz::Array<double,2>& U = machine.updateU();
+  bob::core::array::randn(*m_rng, U);
+  blitz::Array<double,2>& V = machine.updateV();
+  bob::core::array::randn(*m_rng, V);
+  blitz::Array<double,1>& D = machine.updateD();
+  bob::core::array::randn(*m_rng, D);
+  machine.precompute();
+}
+
+void bob::learn::em::JFATrainer::eStep1(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  const bob::learn::em::FABase& base = machine.getBase();
+  m_base_trainer.updateY(base, ar);
+  m_base_trainer.computeAccumulatorsV(base, ar);
+}
+
+void bob::learn::em::JFATrainer::mStep1(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  blitz::Array<double,2>& V = machine.updateV();
+  m_base_trainer.updateV(V);
+}
+
+void bob::learn::em::JFATrainer::finalize1(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  const bob::learn::em::FABase& base = machine.getBase();
+  m_base_trainer.updateY(base, ar);
+}
+
+
+void bob::learn::em::JFATrainer::eStep2(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  const bob::learn::em::FABase& base = machine.getBase();
+  m_base_trainer.updateX(base, ar);
+  m_base_trainer.computeAccumulatorsU(base, ar);
+}
+
+void bob::learn::em::JFATrainer::mStep2(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  blitz::Array<double,2>& U = machine.updateU();
+  m_base_trainer.updateU(U);
+  machine.precompute();
+}
+
+void bob::learn::em::JFATrainer::finalize2(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  const bob::learn::em::FABase& base = machine.getBase();
+  m_base_trainer.updateX(base, ar);
+}
+
+
+void bob::learn::em::JFATrainer::eStep3(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  const bob::learn::em::FABase& base = machine.getBase();
+  m_base_trainer.updateZ(base, ar);
+  m_base_trainer.computeAccumulatorsD(base, ar);
+}
+
+void bob::learn::em::JFATrainer::mStep3(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  blitz::Array<double,1>& d = machine.updateD();
+  m_base_trainer.updateD(d);
+}
+
+void bob::learn::em::JFATrainer::finalize3(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+}
+
+/*
+void bob::learn::em::JFATrainer::train_loop(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  // V subspace
+  for (size_t i=0; i<m_max_iterations; ++i) {
+    eStep1(machine, ar);
+    mStep1(machine, ar);
+  }
+  finalize1(machine, ar);
+  // U subspace
+  for (size_t i=0; i<m_max_iterations; ++i) {
+    eStep2(machine, ar);
+    mStep2(machine, ar);
+  }
+  finalize2(machine, ar);
+  // d subspace
+  for (size_t i=0; i<m_max_iterations; ++i) {
+    eStep3(machine, ar);
+    mStep3(machine, ar);
+  }
+  finalize3(machine, ar);
+}*/
+
+/*
+void bob::learn::em::JFATrainer::train(bob::learn::em::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar)
+{
+  initialize(machine, ar);
+  train_loop(machine, ar);
+}
+*/
+
+void bob::learn::em::JFATrainer::enrol(bob::learn::em::JFAMachine& machine,
+  const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& ar,
+  const size_t n_iter)
+{
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > vvec;
+  vvec.push_back(ar);
+
+  const bob::learn::em::FABase& fb = machine.getJFABase()->getBase();
+
+  m_base_trainer.initUbmNidSumStatistics(fb, vvec);
+  m_base_trainer.initializeXYZ(vvec);
+
+  for (size_t i=0; i<n_iter; ++i) {
+    m_base_trainer.updateY(fb, vvec);
+    m_base_trainer.updateX(fb, vvec);
+    m_base_trainer.updateZ(fb, vvec);
+  }
+
+  const blitz::Array<double,1> y(m_base_trainer.getY()[0]);
+  const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
+  machine.setY(y);
+  machine.setZ(z);
+}
+
diff --git a/bob/learn/em/cpp/KMeansMachine.cpp b/bob/learn/em/cpp/KMeansMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..395cb1460996cb0c77988515a3d9a8b1e7304eca
--- /dev/null
+++ b/bob/learn/em/cpp/KMeansMachine.cpp
@@ -0,0 +1,258 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/KMeansMachine.h>
+
+#include <bob.core/assert.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <limits>
+
+bob::learn::em::KMeansMachine::KMeansMachine():
+  m_n_means(0), m_n_inputs(0), m_means(0,0),
+  m_cache_means(0,0)
+{
+  m_means = 0;
+}
+
+bob::learn::em::KMeansMachine::KMeansMachine(const size_t n_means, const size_t n_inputs):
+  m_n_means(n_means), m_n_inputs(n_inputs), m_means(n_means, n_inputs),
+  m_cache_means(n_means, n_inputs)
+{
+  m_means = 0;
+}
+
+bob::learn::em::KMeansMachine::KMeansMachine(const blitz::Array<double,2>& means):
+  m_n_means(means.extent(0)), m_n_inputs(means.extent(1)),
+  m_means(bob::core::array::ccopy(means)),
+  m_cache_means(means.shape())
+{
+}
+
+bob::learn::em::KMeansMachine::KMeansMachine(const bob::learn::em::KMeansMachine& other):
+  m_n_means(other.m_n_means), m_n_inputs(other.m_n_inputs),
+  m_means(bob::core::array::ccopy(other.m_means)),
+  m_cache_means(other.m_cache_means.shape())
+{
+}
+
+bob::learn::em::KMeansMachine::KMeansMachine(bob::io::base::HDF5File& config)
+{
+  load(config);
+}
+
+bob::learn::em::KMeansMachine::~KMeansMachine() { }
+
+bob::learn::em::KMeansMachine& bob::learn::em::KMeansMachine::operator=
+(const bob::learn::em::KMeansMachine& other)
+{
+  if(this != &other)
+  {
+    m_n_means = other.m_n_means;
+    m_n_inputs = other.m_n_inputs;
+    m_means.reference(bob::core::array::ccopy(other.m_means));
+    m_cache_means.resize(other.m_means.shape());
+  }
+  return *this;
+}
+
+bool bob::learn::em::KMeansMachine::operator==(const bob::learn::em::KMeansMachine& b) const
+{
+  return m_n_inputs == b.m_n_inputs && m_n_means == b.m_n_means &&
+         bob::core::array::isEqual(m_means, b.m_means);
+}
+
+bool bob::learn::em::KMeansMachine::operator!=(const bob::learn::em::KMeansMachine& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::KMeansMachine::is_similar_to(const bob::learn::em::KMeansMachine& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  return m_n_inputs == b.m_n_inputs && m_n_means == b.m_n_means &&
+         bob::core::array::isClose(m_means, b.m_means, r_epsilon, a_epsilon);
+}
+
+void bob::learn::em::KMeansMachine::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  m_means.reference(config.readArray<double,2>("means"));
+  m_n_means = m_means.extent(0);
+  m_n_inputs = m_means.extent(1);
+  m_cache_means.resize(m_n_means, m_n_inputs);
+}
+
+void bob::learn::em::KMeansMachine::save(bob::io::base::HDF5File& config) const
+{
+  config.setArray("means", m_means);
+}
+
+void bob::learn::em::KMeansMachine::setMeans(const blitz::Array<double,2> &means)
+{
+  bob::core::array::assertSameShape(means, m_means);
+  m_means = means;
+}
+
+void bob::learn::em::KMeansMachine::setMean(const size_t i, const blitz::Array<double,1> &mean)
+{
+  if(i>=m_n_means) {
+    boost::format m("cannot set mean with index %lu: out of bounds [0,%lu[");
+    m % i % m_n_means;
+    throw std::runtime_error(m.str());
+  }
+  bob::core::array::assertSameDimensionLength(mean.extent(0), m_means.extent(1));
+  m_means(i,blitz::Range::all()) = mean;
+}
+
+const blitz::Array<double,1> bob::learn::em::KMeansMachine::getMean(const size_t i) const
+{
+  if(i>=m_n_means) {
+    boost::format m("cannot get mean with index %lu: out of bounds [0,%lu[");
+    m % i % m_n_means;
+    throw std::runtime_error(m.str());
+  }
+
+  return m_means(i,blitz::Range::all());
+
+}
+
+double bob::learn::em::KMeansMachine::getDistanceFromMean(const blitz::Array<double,1> &x,
+  const size_t i) const
+{
+  return blitz::sum(blitz::pow2(m_means(i,blitz::Range::all()) - x));
+}
+
+void bob::learn::em::KMeansMachine::getClosestMean(const blitz::Array<double,1> &x,
+  size_t &closest_mean, double &min_distance) const
+{
+  min_distance = std::numeric_limits<double>::max();
+
+  for(size_t i=0; i<m_n_means; ++i) {
+    double this_distance = getDistanceFromMean(x,i);
+    if(this_distance < min_distance) {
+      min_distance = this_distance;
+      closest_mean = i;
+    }
+  }
+}
+
+double bob::learn::em::KMeansMachine::getMinDistance(const blitz::Array<double,1>& input) const
+{
+  size_t closest_mean = 0;
+  double min_distance = 0;
+  getClosestMean(input,closest_mean,min_distance);
+  return min_distance;
+}
+
+void bob::learn::em::KMeansMachine::getVariancesAndWeightsForEachClusterInit(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+{
+  // check arguments
+  bob::core::array::assertSameShape(variances, m_means);
+  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
+
+  // initialise output arrays
+  bob::core::array::assertSameShape(variances, m_means);
+  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
+  variances = 0;
+  weights = 0;
+
+  // initialise (temporary) mean array
+  m_cache_means = 0;
+}
+
+void bob::learn::em::KMeansMachine::getVariancesAndWeightsForEachClusterAcc(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+{
+  // check arguments
+  bob::core::array::assertSameShape(variances, m_means);
+  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
+
+  // iterate over data
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<data.extent(0); ++i) {
+    // - get example
+    blitz::Array<double,1> x(data(i,a));
+
+    // - find closest mean
+    size_t closest_mean = 0;
+    double min_distance = 0;
+    getClosestMean(x,closest_mean,min_distance);
+
+    // - accumulate stats
+    m_cache_means(closest_mean, blitz::Range::all()) += x;
+    variances(closest_mean, blitz::Range::all()) += blitz::pow2(x);
+    ++weights(closest_mean);
+  }
+}
+
+void bob::learn::em::KMeansMachine::getVariancesAndWeightsForEachClusterFin(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+{
+  // check arguments
+  bob::core::array::assertSameShape(variances, m_means);
+  bob::core::array::assertSameDimensionLength(weights.extent(0), m_n_means);
+
+  // calculate final variances and weights
+  blitz::firstIndex idx1;
+  blitz::secondIndex idx2;
+
+  // find means
+  m_cache_means = m_cache_means(idx1,idx2) / weights(idx1);
+
+  // find variances
+  variances = variances(idx1,idx2) / weights(idx1);
+  variances -= blitz::pow2(m_cache_means);
+
+  // find weights
+  weights = weights / blitz::sum(weights);
+}
+
+void bob::learn::em::KMeansMachine::setCacheMeans(const blitz::Array<double,2> &cache_means)
+{
+  bob::core::array::assertSameShape(cache_means, m_cache_means);
+  m_cache_means = cache_means;
+}
+
+void bob::learn::em::KMeansMachine::getVariancesAndWeightsForEachCluster(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+{
+  // initialise
+  getVariancesAndWeightsForEachClusterInit(variances, weights);
+  // accumulate
+  getVariancesAndWeightsForEachClusterAcc(data, variances, weights);
+  // merge/finalize
+  getVariancesAndWeightsForEachClusterFin(variances, weights);
+}
+
+void bob::learn::em::KMeansMachine::forward(const blitz::Array<double,1>& input, double& output) const
+{
+  if(static_cast<size_t>(input.extent(0)) != m_n_inputs) {
+    boost::format m("machine input size (%u) does not match the size of input array (%d)");
+    m % m_n_inputs % input.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  forward_(input,output);
+}
+
+void bob::learn::em::KMeansMachine::forward_(const blitz::Array<double,1>& input, double& output) const
+{
+  output = getMinDistance(input);
+}
+
+void bob::learn::em::KMeansMachine::resize(const size_t n_means, const size_t n_inputs)
+{
+  m_n_means = n_means;
+  m_n_inputs = n_inputs;
+  m_means.resizeAndPreserve(n_means, n_inputs);
+  m_cache_means.resizeAndPreserve(n_means, n_inputs);
+}
+
+namespace bob { namespace learn { namespace em {
+  std::ostream& operator<<(std::ostream& os, const KMeansMachine& km) {
+    os << "Means = " << km.m_means << std::endl;
+    return os;
+  }
+} } }
diff --git a/bob/learn/em/cpp/KMeansTrainer.cpp b/bob/learn/em/cpp/KMeansTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9ab18d69581b5b2528b00798ccf088391ff3d72f
--- /dev/null
+++ b/bob/learn/em/cpp/KMeansTrainer.cpp
@@ -0,0 +1,228 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/KMeansTrainer.h>
+#include <bob.core/array_copy.h>
+
+#include <boost/random.hpp>
+#include <bob.core/random.h>
+
+
+bob::learn::em::KMeansTrainer::KMeansTrainer(InitializationMethod i_m):
+m_rng(new boost::mt19937()),
+m_average_min_distance(0),
+m_zeroethOrderStats(0),
+m_firstOrderStats(0)
+{
+  m_initialization_method = i_m;
+}
+
+
+bob::learn::em::KMeansTrainer::KMeansTrainer(const bob::learn::em::KMeansTrainer& other){
+    
+  m_initialization_method = other.m_initialization_method;  
+  m_rng                   = other.m_rng;
+  m_average_min_distance  = other.m_average_min_distance;
+  m_zeroethOrderStats     = bob::core::array::ccopy(other.m_zeroethOrderStats); 
+  m_firstOrderStats       = bob::core::array::ccopy(other.m_firstOrderStats);
+}
+
+
+bob::learn::em::KMeansTrainer& bob::learn::em::KMeansTrainer::operator=
+(const bob::learn::em::KMeansTrainer& other)
+{
+  if(this != &other)
+  {
+    m_rng                         = other.m_rng;
+    m_initialization_method       = other.m_initialization_method;
+    m_average_min_distance        = other.m_average_min_distance;
+
+    m_zeroethOrderStats = bob::core::array::ccopy(other.m_zeroethOrderStats);
+    m_firstOrderStats   = bob::core::array::ccopy(other.m_firstOrderStats);
+  }
+  return *this;
+}
+
+
+bool bob::learn::em::KMeansTrainer::operator==(const bob::learn::em::KMeansTrainer& b) const {
+  return
+         m_initialization_method == b.m_initialization_method &&
+         *m_rng == *(b.m_rng) && m_average_min_distance == b.m_average_min_distance &&
+         bob::core::array::hasSameShape(m_zeroethOrderStats, b.m_zeroethOrderStats) &&
+         bob::core::array::hasSameShape(m_firstOrderStats, b.m_firstOrderStats) &&
+         blitz::all(m_zeroethOrderStats == b.m_zeroethOrderStats) &&
+         blitz::all(m_firstOrderStats == b.m_firstOrderStats);
+}
+
+bool bob::learn::em::KMeansTrainer::operator!=(const bob::learn::em::KMeansTrainer& b) const {
+  return !(this->operator==(b));
+}
+
+void bob::learn::em::KMeansTrainer::initialize(bob::learn::em::KMeansMachine& kmeans,
+  const blitz::Array<double,2>& ar)
+{
+  // split data into as many chunks as there are means
+  size_t n_data = ar.extent(0);
+
+  // assign the i'th mean to a random example within the i'th chunk
+  blitz::Range a = blitz::Range::all();
+  if(m_initialization_method == RANDOM || m_initialization_method == RANDOM_NO_DUPLICATE) // Random initialization
+  {
+    unsigned int n_chunk = n_data / kmeans.getNMeans();
+    size_t n_max_trials = (size_t)n_chunk * 5;
+    blitz::Array<double,1> cur_mean;
+    if(m_initialization_method == RANDOM_NO_DUPLICATE)
+      cur_mean.resize(kmeans.getNInputs());
+
+    for(size_t i=0; i<kmeans.getNMeans(); ++i)
+    {
+      boost::uniform_int<> die(i*n_chunk, (i+1)*n_chunk-1);
+
+      // get random index within chunk
+      unsigned int index = die(*m_rng);
+
+      // get the example at that index
+      blitz::Array<double, 1> mean = ar(index,a);
+
+      if(m_initialization_method == RANDOM_NO_DUPLICATE)
+      {
+        size_t count = 0;
+        while(count < n_max_trials)
+        {
+          // check that the selected sampled is different than all the previously
+          // selected ones
+          bool valid = true;
+          for(size_t j=0; j<i && valid; ++j)
+          {
+            cur_mean = kmeans.getMean(j);
+            valid = blitz::any(mean != cur_mean);
+          }
+          // if different, stop otherwise, try with another one
+          if(valid)
+            break;
+          else
+          {
+            index = die(*m_rng);
+            mean = ar(index,a);
+            ++count;
+          }
+        }
+        // Initialization fails
+        if(count >= n_max_trials) {
+          boost::format m("initialization failure: surpassed the maximum number of trials (%u)");
+          m % n_max_trials;
+          throw std::runtime_error(m.str());
+        }
+      }
+
+      // set the mean
+      kmeans.setMean(i, mean);
+    }
+  }
+  else // K-Means++
+  {
+    // 1.a. Selects one sample randomly
+    boost::uniform_int<> die(0, n_data-1);
+    //   Gets the example at a random index
+    blitz::Array<double,1> mean = ar(die(*m_rng),a);
+    kmeans.setMean(0, mean);
+
+    // 1.b. Loops, computes probability distribution and select samples accordingly
+    blitz::Array<double,1> weights(n_data);
+    for(size_t m=1; m<kmeans.getNMeans(); ++m)
+    {
+      // For each sample, puts the distance to the closest mean in the weight vector
+      for(size_t s=0; s<n_data; ++s)
+      {
+        blitz::Array<double,1> s_cur = ar(s,a);
+        double& w_cur = weights(s);
+        // Initializes with the distance to first mean
+        w_cur = kmeans.getDistanceFromMean(s_cur, 0);
+        // Loops over the remaining mean and update the mean distance if required
+        for(size_t i=1; i<m; ++i)
+          w_cur = std::min(w_cur, kmeans.getDistanceFromMean(s_cur, i));
+      }
+      // Square and normalize the weights vectors such that
+      // \f$weights[x] = D(x)^{2} \sum_{y} D(y)^{2}\f$
+      weights = blitz::pow2(weights);
+      weights /= blitz::sum(weights);
+
+      // Takes a sample according to the weights distribution
+      // Blitz iterators is fine as the weights array should be C-style contiguous
+      bob::core::array::assertCContiguous(weights);
+      bob::core::random::discrete_distribution<> die2(weights.begin(), weights.end());
+      blitz::Array<double,1> new_mean = ar(die2(*m_rng),a);
+      kmeans.setMean(m, new_mean);
+    }
+  }
+   // Resize the accumulator
+  m_zeroethOrderStats.resize(kmeans.getNMeans());
+  m_firstOrderStats.resize(kmeans.getNMeans(), kmeans.getNInputs());
+}
+
+void bob::learn::em::KMeansTrainer::eStep(bob::learn::em::KMeansMachine& kmeans,
+  const blitz::Array<double,2>& ar)
+{
+  // initialise the accumulators
+  resetAccumulators(kmeans);
+
+  // iterate over data samples
+  blitz::Range a = blitz::Range::all();
+  for(int i=0; i<ar.extent(0); ++i) {
+    // get example
+    blitz::Array<double, 1> x(ar(i,a));
+
+    // find closest mean, and distance from that mean
+    size_t closest_mean = 0;
+    double min_distance = 0;
+    kmeans.getClosestMean(x,closest_mean,min_distance);
+
+    // accumulate the stats
+    m_average_min_distance += min_distance;
+    ++m_zeroethOrderStats(closest_mean);
+    m_firstOrderStats(closest_mean,blitz::Range::all()) += x;
+  }
+  m_average_min_distance /= static_cast<double>(ar.extent(0));
+}
+
+void bob::learn::em::KMeansTrainer::mStep(bob::learn::em::KMeansMachine& kmeans)
+{
+  blitz::Array<double,2>& means = kmeans.updateMeans();
+  for(size_t i=0; i<kmeans.getNMeans(); ++i)
+  {
+    means(i,blitz::Range::all()) =
+      m_firstOrderStats(i,blitz::Range::all()) / m_zeroethOrderStats(i);
+  }
+}
+
+double bob::learn::em::KMeansTrainer::computeLikelihood(bob::learn::em::KMeansMachine& kmeans)
+{
+  return m_average_min_distance;
+}
+
+
+bool bob::learn::em::KMeansTrainer::resetAccumulators(bob::learn::em::KMeansMachine& kmeans)
+{
+  m_average_min_distance = 0;
+  m_zeroethOrderStats = 0;
+  m_firstOrderStats = 0;
+  return true;
+}
+
+void bob::learn::em::KMeansTrainer::setZeroethOrderStats(const blitz::Array<double,1>& zeroethOrderStats)
+{
+  bob::core::array::assertSameShape(m_zeroethOrderStats, zeroethOrderStats);
+  m_zeroethOrderStats = zeroethOrderStats;
+}
+
+void bob::learn::em::KMeansTrainer::setFirstOrderStats(const blitz::Array<double,2>& firstOrderStats)
+{
+  bob::core::array::assertSameShape(m_firstOrderStats, firstOrderStats);
+  m_firstOrderStats = firstOrderStats;
+}
+
diff --git a/bob/learn/em/cpp/LinearScoring.cpp b/bob/learn/em/cpp/LinearScoring.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..63fdb1cce18f97686948b7ed0cfb0db3413a530b
--- /dev/null
+++ b/bob/learn/em/cpp/LinearScoring.cpp
@@ -0,0 +1,168 @@
+/**
+ * @date Wed Jul 13 16:00:04 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+#include <bob.learn.em/LinearScoring.h>
+#include <bob.math/linear.h>
+
+
+static void _linearScoring(const std::vector<blitz::Array<double,1> >& models,
+                   const blitz::Array<double,1>& ubm_mean,
+                   const blitz::Array<double,1>& ubm_variance,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const std::vector<blitz::Array<double,1> >* test_channelOffset,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double,2>& scores)
+{
+  int C = test_stats[0]->sumPx.extent(0);
+  int D = test_stats[0]->sumPx.extent(1);
+  int CD = C*D;
+  int Tt = test_stats.size();
+  int Tm = models.size();
+
+  // Check output size
+  bob::core::array::assertSameDimensionLength(scores.extent(0), models.size());
+  bob::core::array::assertSameDimensionLength(scores.extent(1), test_stats.size());
+
+  blitz::Array<double,2> A(Tm, CD);
+  blitz::Array<double,2> B(CD, Tt);
+
+  // 1) Compute A
+  for(int t=0; t<Tm; ++t) {
+    blitz::Array<double, 1> tmp = A(t, blitz::Range::all());
+    tmp = (models[t] - ubm_mean) / ubm_variance;
+  }
+
+  // 2) Compute B
+  if(test_channelOffset == 0) {
+    for(int t=0; t<Tt; ++t)
+      for(int s=0; s<CD; ++s)
+        B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (ubm_mean(s) * test_stats[t]->n(s/D));
+  }
+  else {
+    bob::core::array::assertSameDimensionLength((*test_channelOffset).size(), Tt);
+
+    for(int t=0; t<Tt; ++t) {
+      bob::core::array::assertSameDimensionLength((*test_channelOffset)[t].extent(0), CD);
+      for(int s=0; s<CD; ++s)
+        B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (test_stats[t]->n(s/D) * (ubm_mean(s) + (*test_channelOffset)[t](s)));
+    }
+  }
+
+  // Apply the normalisation if needed
+  if(frame_length_normalisation) {
+    for(int t=0; t<Tt; ++t) {
+      double sum_N = test_stats[t]->T;
+      blitz::Array<double, 1> v_t = B(blitz::Range::all(),t);
+
+      if (sum_N <= std::numeric_limits<double>::epsilon() && sum_N >= -std::numeric_limits<double>::epsilon())
+        v_t = 0;
+      else
+        v_t /= sum_N;
+    }
+  }
+
+  // 3) Compute LLR
+  bob::math::prod(A, B, scores);
+}
+
+
+void bob::learn::em::linearScoring(const std::vector<blitz::Array<double,1> >& models,
+                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const std::vector<blitz::Array<double,1> >& test_channelOffset,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double, 2>& scores)
+{
+  _linearScoring(models, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
+}
+
+void bob::learn::em::linearScoring(const std::vector<blitz::Array<double,1> >& models,
+                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double, 2>& scores)
+{
+  _linearScoring(models, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
+}
+
+void bob::learn::em::linearScoring(const std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> >& models,
+                   const bob::learn::em::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double, 2>& scores)
+{
+  int C = test_stats[0]->sumPx.extent(0);
+  int D = test_stats[0]->sumPx.extent(1);
+  int CD = C*D;
+  std::vector<blitz::Array<double,1> > models_b;
+  // Allocate and get the mean supervector
+  for(size_t i=0; i<models.size(); ++i) {
+    blitz::Array<double,1> mod(CD);
+    mod = models[i]->getMeanSupervector();
+    models_b.push_back(mod);
+  }
+  const blitz::Array<double,1>& ubm_mean = ubm.getMeanSupervector();
+  const blitz::Array<double,1>& ubm_variance = ubm.getVarianceSupervector();
+  _linearScoring(models_b, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
+}
+
+void bob::learn::em::linearScoring(const std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> >& models,
+                   const bob::learn::em::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const std::vector<blitz::Array<double,1> >& test_channelOffset,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double, 2>& scores)
+{
+  int C = test_stats[0]->sumPx.extent(0);
+  int D = test_stats[0]->sumPx.extent(1);
+  int CD = C*D;
+  std::vector<blitz::Array<double,1> > models_b;
+  // Allocate and get the mean supervector
+  for(size_t i=0; i<models.size(); ++i) {
+    blitz::Array<double,1> mod(CD);
+    mod = models[i]->getMeanSupervector();
+    models_b.push_back(mod);
+  }
+  const blitz::Array<double,1>& ubm_mean = ubm.getMeanSupervector();
+  const blitz::Array<double,1>& ubm_variance = ubm.getVarianceSupervector();
+  _linearScoring(models_b, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
+}
+
+
+
+double bob::learn::em::linearScoring(const blitz::Array<double,1>& models,
+                     const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
+                     const bob::learn::em::GMMStats& test_stats,
+                     const blitz::Array<double,1>& test_channelOffset,
+                     const bool frame_length_normalisation)
+{
+  int C = test_stats.sumPx.extent(0);
+  int D = test_stats.sumPx.extent(1);
+  int CD = C*D;
+
+
+  blitz::Array<double,1> A(CD);
+  blitz::Array<double,1> B(CD);
+
+  // 1) Compute A
+  A = (models - ubm_mean) / ubm_variance;
+
+  // 2) Compute B
+  for (int s=0; s<CD; ++s)
+    B(s) = test_stats.sumPx(s/D, s%D) - (test_stats.n(s/D) * (ubm_mean(s) + test_channelOffset(s)));
+
+  // Apply the normalisation if needed
+  if (frame_length_normalisation) {
+    double sum_N = test_stats.T;
+    if (sum_N == 0)
+      B = 0;
+    else
+      B /= sum_N;
+  }
+
+  return blitz::sum(A * B);
+}
+
diff --git a/bob/learn/em/cpp/MAP_GMMTrainer.cpp b/bob/learn/em/cpp/MAP_GMMTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..64779c5f1b863f23a9ba3009dc2de73b16cbc9c4
--- /dev/null
+++ b/bob/learn/em/cpp/MAP_GMMTrainer.cpp
@@ -0,0 +1,199 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/MAP_GMMTrainer.h>
+#include <bob.core/check.h>
+
+bob::learn::em::MAP_GMMTrainer::MAP_GMMTrainer(
+   const bool update_means,
+   const bool update_variances,
+   const bool update_weights,
+   const double mean_var_update_responsibilities_threshold,
+
+   const bool reynolds_adaptation, 
+   const double relevance_factor, 
+   const double alpha,
+   boost::shared_ptr<bob::learn::em::GMMMachine> prior_gmm):
+
+  m_gmm_base_trainer(update_means, update_variances, update_weights, mean_var_update_responsibilities_threshold),
+  m_prior_gmm(prior_gmm)
+{
+  m_reynolds_adaptation = reynolds_adaptation;
+  m_relevance_factor    = relevance_factor;
+  m_alpha               = alpha;
+}
+
+
+bob::learn::em::MAP_GMMTrainer::MAP_GMMTrainer(const bob::learn::em::MAP_GMMTrainer& b):
+  m_gmm_base_trainer(b.m_gmm_base_trainer),
+  m_prior_gmm(b.m_prior_gmm)
+{
+  m_relevance_factor    = b.m_relevance_factor;
+  m_alpha               = b.m_alpha; 
+  m_reynolds_adaptation = b.m_reynolds_adaptation;
+}
+
+bob::learn::em::MAP_GMMTrainer::~MAP_GMMTrainer()
+{}
+
+void bob::learn::em::MAP_GMMTrainer::initialize(bob::learn::em::GMMMachine& gmm)
+{
+  // Check that the prior GMM has been specified
+  if (!m_prior_gmm)
+    throw std::runtime_error("MAP_GMMTrainer: Prior GMM distribution has not been set");
+
+  // Allocate memory for the sufficient statistics and initialise
+  m_gmm_base_trainer.initialize(gmm);
+
+  const size_t n_gaussians = gmm.getNGaussians();
+  // TODO: check size?
+  gmm.setWeights(m_prior_gmm->getWeights());
+  for(size_t i=0; i<n_gaussians; ++i)
+  {
+    gmm.getGaussian(i)->updateMean() = m_prior_gmm->getGaussian(i)->getMean();
+    gmm.getGaussian(i)->updateVariance() = m_prior_gmm->getGaussian(i)->getVariance();
+    gmm.getGaussian(i)->applyVarianceThresholds();
+  }
+  // Initializes cache
+  m_cache_alpha.resize(n_gaussians);
+  m_cache_ml_weights.resize(n_gaussians);
+}
+
+bool bob::learn::em::MAP_GMMTrainer::setPriorGMM(boost::shared_ptr<bob::learn::em::GMMMachine> prior_gmm)
+{
+  if (!prior_gmm) return false;
+  m_prior_gmm = prior_gmm;
+  return true;
+}
+
+
+void bob::learn::em::MAP_GMMTrainer::mStep(bob::learn::em::GMMMachine& gmm)
+{
+  // Read options and variables
+  double n_gaussians = gmm.getNGaussians();
+
+  // Check that the prior GMM has been specified
+  if (!m_prior_gmm)
+    throw std::runtime_error("MAP_GMMTrainer: Prior GMM distribution has not been set");
+
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+
+  // Calculate the "data-dependent adaptation coefficient", alpha_i
+  // TODO: check if required // m_cache_alpha.resize(n_gaussians);
+  if (!m_reynolds_adaptation)
+    m_cache_alpha = m_alpha;
+  else
+    m_cache_alpha = m_gmm_base_trainer.getGMMStats().n(i) / (m_gmm_base_trainer.getGMMStats().n(i) + m_relevance_factor);
+
+  // - Update weights if requested
+  //   Equation 11 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000
+  if (m_gmm_base_trainer.getUpdateWeights()) {
+    // Calculate the maximum likelihood weights
+    m_cache_ml_weights = m_gmm_base_trainer.getGMMStats().n / static_cast<double>(m_gmm_base_trainer.getGMMStats().T); //cast req. for linux/32-bits & osx
+
+    // Get the prior weights
+    const blitz::Array<double,1>& prior_weights = m_prior_gmm->getWeights();
+    blitz::Array<double,1>& new_weights = gmm.updateWeights();
+
+    // Calculate the new weights
+    new_weights = m_cache_alpha * m_cache_ml_weights + (1-m_cache_alpha) * prior_weights;
+
+    // Apply the scale factor, gamma, to ensure the new weights sum to unity
+    double gamma = blitz::sum(new_weights);
+    new_weights /= gamma;
+
+    // Recompute the log weights in the cache of the GMMMachine
+    gmm.recomputeLogWeights();
+  }
+
+  // Update GMM parameters
+  // - Update means if requested
+  //   Equation 12 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000
+  if (m_gmm_base_trainer.getUpdateMeans()) {
+    // Calculate new means
+    for (size_t i=0; i<n_gaussians; ++i) {
+      const blitz::Array<double,1>& prior_means = m_prior_gmm->getGaussian(i)->getMean();
+      blitz::Array<double,1>& means = gmm.getGaussian(i)->updateMean();
+      if (m_gmm_base_trainer.getGMMStats().n(i) < m_gmm_base_trainer.getMeanVarUpdateResponsibilitiesThreshold()) {
+        means = prior_means;
+      }
+      else {
+        // Use the maximum likelihood means
+        means = m_cache_alpha(i) * (m_gmm_base_trainer.getGMMStats().sumPx(i,blitz::Range::all()) / m_gmm_base_trainer.getGMMStats().n(i)) + (1-m_cache_alpha(i)) * prior_means;
+      }
+    }
+  }
+
+  // - Update variance if requested
+  //   Equation 13 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000
+  if (m_gmm_base_trainer.getUpdateVariances()) {
+    // Calculate new variances (equation 13)
+    for (size_t i=0; i<n_gaussians; ++i) {
+      const blitz::Array<double,1>& prior_means = m_prior_gmm->getGaussian(i)->getMean();
+      blitz::Array<double,1>& means = gmm.getGaussian(i)->updateMean();
+      const blitz::Array<double,1>& prior_variances = m_prior_gmm->getGaussian(i)->getVariance();
+      blitz::Array<double,1>& variances = gmm.getGaussian(i)->updateVariance();
+      if (m_gmm_base_trainer.getGMMStats().n(i) < m_gmm_base_trainer.getMeanVarUpdateResponsibilitiesThreshold()) {
+        variances = (prior_variances + prior_means) - blitz::pow2(means);
+      }
+      else {
+        variances = m_cache_alpha(i) * m_gmm_base_trainer.getGMMStats().sumPxx(i,blitz::Range::all()) / m_gmm_base_trainer.getGMMStats().n(i) + (1-m_cache_alpha(i)) * (prior_variances + prior_means) - blitz::pow2(means);
+      }
+      gmm.getGaussian(i)->applyVarianceThresholds();
+    }
+  }
+}
+
+
+
+bob::learn::em::MAP_GMMTrainer& bob::learn::em::MAP_GMMTrainer::operator=
+  (const bob::learn::em::MAP_GMMTrainer &other)
+{
+  if (this != &other)
+  {
+    m_gmm_base_trainer    = other.m_gmm_base_trainer;
+    m_relevance_factor    = other.m_relevance_factor;
+    m_prior_gmm           = other.m_prior_gmm;
+    m_alpha               = other.m_alpha;
+    m_reynolds_adaptation = other.m_reynolds_adaptation;
+    m_cache_alpha.resize(other.m_cache_alpha.extent(0));
+    m_cache_ml_weights.resize(other.m_cache_ml_weights.extent(0));
+  }
+  return *this;
+}
+
+
+bool bob::learn::em::MAP_GMMTrainer::operator==
+  (const bob::learn::em::MAP_GMMTrainer &other) const
+{
+  return m_gmm_base_trainer    == other.m_gmm_base_trainer &&
+         m_relevance_factor    == other.m_relevance_factor &&
+         m_prior_gmm           == other.m_prior_gmm &&
+         m_alpha               == other.m_alpha &&
+         m_reynolds_adaptation == other.m_reynolds_adaptation;
+}
+
+
+bool bob::learn::em::MAP_GMMTrainer::operator!=
+  (const bob::learn::em::MAP_GMMTrainer &other) const
+{
+  return !(this->operator==(other));
+}
+
+
+bool bob::learn::em::MAP_GMMTrainer::is_similar_to
+  (const bob::learn::em::MAP_GMMTrainer &other, const double r_epsilon,
+   const double a_epsilon) const
+{
+  return //m_gmm_base_trainer.is_similar_to(other.m_gmm_base_trainer, r_epsilon, a_epsilon) &&
+         bob::core::isClose(m_relevance_factor, other.m_relevance_factor, r_epsilon, a_epsilon) &&
+         m_prior_gmm == other.m_prior_gmm &&
+         bob::core::isClose(m_alpha, other.m_alpha, r_epsilon, a_epsilon) &&
+         m_reynolds_adaptation == other.m_reynolds_adaptation;
+}
+
diff --git a/bob/learn/em/cpp/ML_GMMTrainer.cpp b/bob/learn/em/cpp/ML_GMMTrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..1c5bc84b292eebcd5e95f35b54c60f177c404260
--- /dev/null
+++ b/bob/learn/em/cpp/ML_GMMTrainer.cpp
@@ -0,0 +1,112 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/ML_GMMTrainer.h>
+#include <algorithm>
+
+bob::learn::em::ML_GMMTrainer::ML_GMMTrainer(
+   const bool update_means,
+   const bool update_variances, 
+   const bool update_weights,
+   const double mean_var_update_responsibilities_threshold
+):
+  m_gmm_base_trainer(update_means, update_variances, update_weights, mean_var_update_responsibilities_threshold)
+{}
+
+
+
+bob::learn::em::ML_GMMTrainer::ML_GMMTrainer(const bob::learn::em::ML_GMMTrainer& b):
+  m_gmm_base_trainer(b.m_gmm_base_trainer)
+{}
+
+bob::learn::em::ML_GMMTrainer::~ML_GMMTrainer()
+{}
+
+void bob::learn::em::ML_GMMTrainer::initialize(bob::learn::em::GMMMachine& gmm)
+{
+  m_gmm_base_trainer.initialize(gmm);
+  
+  // Allocate cache
+  size_t n_gaussians = gmm.getNGaussians();
+  m_cache_ss_n_thresholded.resize(n_gaussians);
+}
+
+
+void bob::learn::em::ML_GMMTrainer::mStep(bob::learn::em::GMMMachine& gmm)
+{
+  // Read options and variables
+  const size_t n_gaussians = gmm.getNGaussians();
+
+  // - Update weights if requested
+  //   Equation 9.26 of Bishop, "Pattern recognition and machine learning", 2006
+  if (m_gmm_base_trainer.getUpdateWeights()) {
+    blitz::Array<double,1>& weights = gmm.updateWeights();
+    weights = m_gmm_base_trainer.getGMMStats().n / static_cast<double>(m_gmm_base_trainer.getGMMStats().T); //cast req. for linux/32-bits & osx
+    // Recompute the log weights in the cache of the GMMMachine
+    gmm.recomputeLogWeights();
+  }
+
+  // Generate a thresholded version of m_ss.n
+  for(size_t i=0; i<n_gaussians; ++i)
+    m_cache_ss_n_thresholded(i) = std::max(m_gmm_base_trainer.getGMMStats().n(i), m_gmm_base_trainer.getMeanVarUpdateResponsibilitiesThreshold());
+
+  // Update GMM parameters using the sufficient statistics (m_ss)
+  // - Update means if requested
+  //   Equation 9.24 of Bishop, "Pattern recognition and machine learning", 2006
+  if (m_gmm_base_trainer.getUpdateMeans()) {
+    for(size_t i=0; i<n_gaussians; ++i) {
+      blitz::Array<double,1>& means = gmm.getGaussian(i)->updateMean();
+      means = m_gmm_base_trainer.getGMMStats().sumPx(i, blitz::Range::all()) / m_cache_ss_n_thresholded(i);
+    }
+  }
+
+  // - Update variance if requested
+  //   See Equation 9.25 of Bishop, "Pattern recognition and machine learning", 2006
+  //   ...but we use the "computational formula for the variance", i.e.
+  //   var = 1/n * sum (P(x-mean)(x-mean))
+  //       = 1/n * sum (Pxx) - mean^2
+  if (m_gmm_base_trainer.getUpdateVariances()) {
+    for(size_t i=0; i<n_gaussians; ++i) {
+      const blitz::Array<double,1>& means = gmm.getGaussian(i)->getMean();
+      blitz::Array<double,1>& variances = gmm.getGaussian(i)->updateVariance();
+      variances = m_gmm_base_trainer.getGMMStats().sumPxx(i, blitz::Range::all()) / m_cache_ss_n_thresholded(i) - blitz::pow2(means);
+      gmm.getGaussian(i)->applyVarianceThresholds();
+    }
+  }
+}
+
+bob::learn::em::ML_GMMTrainer& bob::learn::em::ML_GMMTrainer::operator=
+  (const bob::learn::em::ML_GMMTrainer &other)
+{
+  if (this != &other)
+  {
+    m_gmm_base_trainer = other.m_gmm_base_trainer;
+    m_cache_ss_n_thresholded.resize(other.m_cache_ss_n_thresholded.extent(0));
+  }
+  return *this;
+}
+
+bool bob::learn::em::ML_GMMTrainer::operator==
+  (const bob::learn::em::ML_GMMTrainer &other) const
+{
+  return m_gmm_base_trainer == other.m_gmm_base_trainer;
+}
+
+bool bob::learn::em::ML_GMMTrainer::operator!=
+  (const bob::learn::em::ML_GMMTrainer &other) const
+{
+  return !(this->operator==(other));
+}
+
+/*
+bool bob::learn::em::ML_GMMTrainer::is_similar_to
+  (const bob::learn::em::ML_GMMTrainer &other, const double r_epsilon,
+   const double a_epsilon) const
+{
+  return m_gmm_base_trainer.is_similar_to(other, r_epsilon, a_epsilon);
+}
+*/
diff --git a/bob/learn/em/cpp/PLDAMachine.cpp b/bob/learn/em/cpp/PLDAMachine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a390cb1c19f82d7875f3714134430a7442db383a
--- /dev/null
+++ b/bob/learn/em/cpp/PLDAMachine.cpp
@@ -0,0 +1,960 @@
+/**
+ * @date Fri Oct 14 18:07:56 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Machines that implements the PLDA model
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.core/assert.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.learn.em/PLDAMachine.h>
+#include <bob.math/linear.h>
+#include <bob.math/det.h>
+#include <bob.math/inv.h>
+
+#include <cmath>
+#include <boost/lexical_cast.hpp>
+#include <string>
+
+bob::learn::em::PLDABase::PLDABase():
+  m_variance_threshold(0.)
+{
+  resizeNoInit(0, 0, 0);
+}
+
+bob::learn::em::PLDABase::PLDABase(const size_t dim_d, const size_t dim_f,
+    const size_t dim_g, const double variance_threshold):
+  m_variance_threshold(variance_threshold)
+{
+  resize(dim_d, dim_f, dim_g);
+}
+
+
+bob::learn::em::PLDABase::PLDABase(const bob::learn::em::PLDABase& other):
+  m_dim_d(other.m_dim_d),
+  m_dim_f(other.m_dim_f),
+  m_dim_g(other.m_dim_g),
+  m_F(bob::core::array::ccopy(other.m_F)),
+  m_G(bob::core::array::ccopy(other.m_G)),
+  m_sigma(bob::core::array::ccopy(other.m_sigma)),
+  m_mu(bob::core::array::ccopy(other.m_mu)),
+  m_variance_threshold(other.m_variance_threshold),
+  m_cache_isigma(bob::core::array::ccopy(other.m_cache_isigma)),
+  m_cache_alpha(bob::core::array::ccopy(other.m_cache_alpha)),
+  m_cache_beta(bob::core::array::ccopy(other.m_cache_beta)),
+  m_cache_gamma(),
+  m_cache_Ft_beta(bob::core::array::ccopy(other.m_cache_Ft_beta)),
+  m_cache_Gt_isigma(bob::core::array::ccopy(other.m_cache_Gt_isigma)),
+  m_cache_logdet_alpha(other.m_cache_logdet_alpha),
+  m_cache_logdet_sigma(other.m_cache_logdet_sigma),
+  m_cache_loglike_constterm(other.m_cache_loglike_constterm)
+{
+  bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
+  resizeTmp();
+}
+
+bob::learn::em::PLDABase::PLDABase(bob::io::base::HDF5File& config) {
+  load(config);
+}
+
+bob::learn::em::PLDABase::~PLDABase() {
+}
+
+bob::learn::em::PLDABase& bob::learn::em::PLDABase::operator=
+    (const bob::learn::em::PLDABase& other)
+{
+  if (this != &other)
+  {
+    m_dim_d = other.m_dim_d;
+    m_dim_f = other.m_dim_f;
+    m_dim_g = other.m_dim_g;
+    m_F.reference(bob::core::array::ccopy(other.m_F));
+    m_G.reference(bob::core::array::ccopy(other.m_G));
+    m_sigma.reference(bob::core::array::ccopy(other.m_sigma));
+    m_mu.reference(bob::core::array::ccopy(other.m_mu));
+    m_variance_threshold = other.m_variance_threshold;
+    m_cache_isigma.reference(bob::core::array::ccopy(other.m_cache_isigma));
+    m_cache_alpha.reference(bob::core::array::ccopy(other.m_cache_alpha));
+    m_cache_beta.reference(bob::core::array::ccopy(other.m_cache_beta));
+    bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
+    m_cache_Ft_beta.reference(bob::core::array::ccopy(other.m_cache_Ft_beta));
+    m_cache_Gt_isigma.reference(bob::core::array::ccopy(other.m_cache_Gt_isigma));
+    m_cache_logdet_alpha = other.m_cache_logdet_alpha;
+    m_cache_logdet_sigma = other.m_cache_logdet_sigma;
+    m_cache_loglike_constterm = other.m_cache_loglike_constterm;
+    resizeTmp();
+  }
+  return *this;
+}
+
+bool bob::learn::em::PLDABase::operator==
+    (const bob::learn::em::PLDABase& b) const
+{
+  if (!(m_dim_d == b.m_dim_d && m_dim_f == b.m_dim_f &&
+        m_dim_g == b.m_dim_g &&
+        bob::core::array::isEqual(m_F, b.m_F) &&
+        bob::core::array::isEqual(m_G, b.m_G) &&
+        bob::core::array::isEqual(m_sigma, b.m_sigma) &&
+        bob::core::array::isEqual(m_mu, b.m_mu) &&
+        m_variance_threshold == b.m_variance_threshold &&
+        bob::core::array::isEqual(m_cache_isigma, b.m_cache_isigma) &&
+        bob::core::array::isEqual(m_cache_alpha, b.m_cache_alpha) &&
+        bob::core::array::isEqual(m_cache_beta, b.m_cache_beta) &&
+        bob::core::array::isEqual(m_cache_gamma, b.m_cache_gamma) &&
+        bob::core::array::isEqual(m_cache_Ft_beta, b.m_cache_Ft_beta) &&
+        bob::core::array::isEqual(m_cache_Gt_isigma, b.m_cache_Gt_isigma) &&
+        m_cache_logdet_alpha == b.m_cache_logdet_alpha &&
+        m_cache_logdet_sigma == b.m_cache_logdet_sigma))
+    return false;
+
+  // m_cache_loglike_constterm
+  if (this->m_cache_loglike_constterm.size() != b.m_cache_loglike_constterm.size())
+    return false;  // differing sizes, they are not the same
+  std::map<size_t, double>::const_iterator i, j;
+  for (i = this->m_cache_loglike_constterm.begin(), j = b.m_cache_loglike_constterm.begin();
+    i != this->m_cache_loglike_constterm.end(); ++i, ++j)
+  {
+    if (i->first != j->first || i->second != j->second)
+      return false;
+  }
+
+  return true;
+}
+
+bool bob::learn::em::PLDABase::operator!=
+    (const bob::learn::em::PLDABase& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::PLDABase::is_similar_to(const bob::learn::em::PLDABase& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  return (m_dim_d == b.m_dim_d && m_dim_f == b.m_dim_f &&
+          m_dim_g == b.m_dim_g &&
+          bob::core::array::isClose(m_F, b.m_F, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_G, b.m_G, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_sigma, b.m_sigma, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_mu, b.m_mu, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_variance_threshold, b.m_variance_threshold, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_isigma, b.m_cache_isigma, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_alpha, b.m_cache_alpha, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_beta, b.m_cache_beta, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_gamma, b.m_cache_gamma, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_Ft_beta, b.m_cache_Ft_beta, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_Gt_isigma, b.m_cache_Gt_isigma, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_cache_logdet_alpha, b.m_cache_logdet_alpha, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_cache_logdet_sigma, b.m_cache_logdet_sigma, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_cache_loglike_constterm, b.m_cache_loglike_constterm));
+}
+
+void bob::learn::em::PLDABase::load(bob::io::base::HDF5File& config)
+{
+  if (!config.contains("dim_d"))
+  {
+    // Then the model was saved using bob < 1.2.0
+    //reads all data directly into the member variables
+    m_F.reference(config.readArray<double,2>("F"));
+    m_G.reference(config.readArray<double,2>("G"));
+    m_dim_d = m_F.extent(0);
+    m_dim_f = m_F.extent(1);
+    m_dim_g = m_G.extent(1);
+    m_sigma.reference(config.readArray<double,1>("sigma"));
+    m_mu.reference(config.readArray<double,1>("mu"));
+    m_cache_isigma.resize(m_dim_d);
+    precomputeISigma();
+    m_variance_threshold = 0.;
+    m_cache_alpha.reference(config.readArray<double,2>("alpha"));
+    m_cache_beta.reference(config.readArray<double,2>("beta"));
+    // gamma and log like constant term (a-dependent terms)
+    if (config.contains("a_indices"))
+    {
+      blitz::Array<uint32_t, 1> a_indices;
+      a_indices.reference(config.readArray<uint32_t,1>("a_indices"));
+      for (int i=0; i<a_indices.extent(0); ++i)
+      {
+        std::string str1 = "gamma_" + boost::lexical_cast<std::string>(a_indices(i));
+        m_cache_gamma[a_indices(i)].reference(config.readArray<double,2>(str1));
+        std::string str2 = "loglikeconstterm_" + boost::lexical_cast<std::string>(a_indices(i));
+        m_cache_loglike_constterm[a_indices(i)] = config.read<double>(str2);
+      }
+    }
+    m_cache_Ft_beta.reference(config.readArray<double,2>("Ft_beta"));
+    m_cache_Gt_isigma.reference(config.readArray<double,2>("Gt_isigma"));
+    m_cache_logdet_alpha = config.read<double>("logdet_alpha");
+    m_cache_logdet_sigma = config.read<double>("logdet_sigma");
+  }
+  else
+  {
+    // Then the model was saved using bob >= 1.2.0
+    //reads all data directly into the member variables
+    m_F.reference(config.readArray<double,2>("F"));
+    m_G.reference(config.readArray<double,2>("G"));
+    // Conditional because previous versions had not these variables
+    m_dim_d = config.read<uint64_t>("dim_d");
+    m_dim_f = config.read<uint64_t>("dim_f");
+    m_dim_g = config.read<uint64_t>("dim_g");
+    m_sigma.reference(config.readArray<double,1>("sigma"));
+    m_mu.reference(config.readArray<double,1>("mu"));
+    m_cache_isigma.resize(m_dim_d);
+    precomputeISigma();
+    if (config.contains("variance_threshold"))
+      m_variance_threshold = config.read<double>("variance_threshold");
+    else if (config.contains("variance_thresholds")) // In case 1.2.0 alpha/beta version has been used
+    {
+      blitz::Array<double,1> tmp;
+      tmp.reference(config.readArray<double,1>("variance_thresholds"));
+      m_variance_threshold = tmp(0);
+    }
+    m_cache_alpha.reference(config.readArray<double,2>("alpha"));
+    m_cache_beta.reference(config.readArray<double,2>("beta"));
+    // gamma's (a-dependent terms)
+    if(config.contains("a_indices_gamma"))
+    {
+      blitz::Array<uint32_t, 1> a_indices;
+      a_indices.reference(config.readArray<uint32_t,1>("a_indices_gamma"));
+      for(int i=0; i<a_indices.extent(0); ++i)
+      {
+        std::string str = "gamma_" + boost::lexical_cast<std::string>(a_indices(i));
+        m_cache_gamma[a_indices(i)].reference(config.readArray<double,2>(str));
+      }
+    }
+    // log likelihood constant term's (a-dependent terms)
+    if(config.contains("a_indices_loglikeconstterm"))
+    {
+      blitz::Array<uint32_t, 1> a_indices;
+      a_indices.reference(config.readArray<uint32_t,1>("a_indices_loglikeconstterm"));
+      for(int i=0; i<a_indices.extent(0); ++i)
+      {
+        std::string str = "loglikeconstterm_" + boost::lexical_cast<std::string>(a_indices(i));
+        m_cache_loglike_constterm[a_indices(i)] = config.read<double>(str);
+      }
+    }
+    m_cache_Ft_beta.reference(config.readArray<double,2>("Ft_beta"));
+    m_cache_Gt_isigma.reference(config.readArray<double,2>("Gt_isigma"));
+    m_cache_logdet_alpha = config.read<double>("logdet_alpha");
+    m_cache_logdet_sigma = config.read<double>("logdet_sigma");
+  }
+  resizeTmp();
+}
+
+void bob::learn::em::PLDABase::save(bob::io::base::HDF5File& config) const
+{
+  config.set("dim_d", (uint64_t)m_dim_d);
+  config.set("dim_f", (uint64_t)m_dim_f);
+  config.set("dim_g", (uint64_t)m_dim_g);
+  config.setArray("F", m_F);
+  config.setArray("G", m_G);
+  config.setArray("sigma", m_sigma);
+  config.setArray("mu", m_mu);
+  config.set("variance_threshold", m_variance_threshold);
+  config.setArray("alpha", m_cache_alpha);
+  config.setArray("beta", m_cache_beta);
+  // gamma's
+  if(m_cache_gamma.size() > 0)
+  {
+    blitz::Array<uint32_t, 1> a_indices(m_cache_gamma.size());
+    int i = 0;
+    for(std::map<size_t,blitz::Array<double,2> >::const_iterator
+        it=m_cache_gamma.begin(); it!=m_cache_gamma.end(); ++it)
+    {
+      a_indices(i) = it->first;
+      std::string str = "gamma_" + boost::lexical_cast<std::string>(it->first);
+      config.setArray(str, it->second);
+      ++i;
+    }
+    config.setArray("a_indices_gamma", a_indices);
+  }
+  // log likelihood constant terms
+  if(m_cache_loglike_constterm.size() > 0)
+  {
+    blitz::Array<uint32_t, 1> a_indices(m_cache_loglike_constterm.size());
+    int i = 0;
+    for(std::map<size_t,double>::const_iterator
+        it=m_cache_loglike_constterm.begin(); it!=m_cache_loglike_constterm.end(); ++it)
+    {
+      a_indices(i) = it->first;
+      std::string str = "loglikeconstterm_" + boost::lexical_cast<std::string>(it->first);
+      config.set(str, it->second);
+      ++i;
+    }
+    config.setArray("a_indices_loglikeconstterm", a_indices);
+  }
+
+  config.setArray("Ft_beta", m_cache_Ft_beta);
+  config.setArray("Gt_isigma", m_cache_Gt_isigma);
+  config.set("logdet_alpha", m_cache_logdet_alpha);
+  config.set("logdet_sigma", m_cache_logdet_sigma);
+}
+
+void bob::learn::em::PLDABase::resizeNoInit(const size_t dim_d, const size_t dim_f,
+    const size_t dim_g)
+{
+  m_dim_d = dim_d;
+  m_dim_f = dim_f;
+  m_dim_g = dim_g;
+  m_F.resize(dim_d, dim_f);
+  m_G.resize(dim_d, dim_g);
+  m_sigma.resize(dim_d);
+  m_mu.resize(dim_d);
+  m_cache_alpha.resize(dim_g, dim_g);
+  m_cache_beta.resize(dim_d, dim_d);
+  m_cache_Ft_beta.resize(dim_f, dim_d);
+  m_cache_Gt_isigma.resize(dim_g, dim_d);
+  m_cache_gamma.clear();
+  m_cache_isigma.resize(dim_d);
+  m_cache_loglike_constterm.clear();
+  resizeTmp();
+}
+
+void bob::learn::em::PLDABase::resizeTmp()
+{
+  m_tmp_d_1.resize(m_dim_d);
+  m_tmp_d_2.resize(m_dim_d);
+  m_tmp_d_ng_1.resize(m_dim_d, m_dim_g);
+  m_tmp_nf_nf_1.resize(m_dim_f, m_dim_f);
+  m_tmp_ng_ng_1.resize(m_dim_g, m_dim_g);
+}
+
+void bob::learn::em::PLDABase::resize(const size_t dim_d, const size_t dim_f,
+    const size_t dim_g)
+{
+  resizeNoInit(dim_d, dim_f, dim_g);
+  initMuFGSigma();
+}
+
+void bob::learn::em::PLDABase::setF(const blitz::Array<double,2>& F)
+{
+  bob::core::array::assertSameShape(F, m_F);
+  m_F.reference(bob::core::array::ccopy(F));
+  // Precomputes useful matrices
+  precompute();
+}
+
+void bob::learn::em::PLDABase::setG(const blitz::Array<double,2>& G)
+{
+  bob::core::array::assertSameShape(G, m_G);
+  m_G.reference(bob::core::array::ccopy(G));
+  // Precomputes useful matrices and values
+  precompute();
+  precomputeLogDetAlpha();
+}
+
+void bob::learn::em::PLDABase::setSigma(const blitz::Array<double,1>& sigma)
+{
+  bob::core::array::assertSameShape(sigma, m_sigma);
+  m_sigma.reference(bob::core::array::ccopy(sigma));
+  // Apply variance flooring threshold: This will also
+  // call the precompute() and precomputeLogLike() methods!
+  applyVarianceThreshold();
+}
+
+void bob::learn::em::PLDABase::setMu(const blitz::Array<double,1>& mu)
+{
+  bob::core::array::assertSameShape(mu, m_mu);
+  m_mu.reference(bob::core::array::ccopy(mu));
+}
+
+void bob::learn::em::PLDABase::setVarianceThreshold(const double value)
+{
+  // Variance flooring
+  m_variance_threshold = value;
+  // Apply variance flooring thresholds: This will also
+  // call the precompute() and precomputeLogLike() methods!
+  applyVarianceThreshold();
+}
+
+void bob::learn::em::PLDABase::applyVarianceThreshold()
+{
+   // Apply variance flooring threshold
+  m_sigma = blitz::where( m_sigma < m_variance_threshold, m_variance_threshold, m_sigma);
+  // Re-compute constants, because m_sigma has changed
+  precompute();
+  precomputeLogLike();
+}
+
+const blitz::Array<double,2>& bob::learn::em::PLDABase::getGamma(const size_t a) const
+{
+  if(!hasGamma(a))
+    throw std::runtime_error("Gamma for this number of samples is not currently in cache. You could use the getAddGamma() method instead");
+  return (m_cache_gamma.find(a))->second;
+}
+
+const blitz::Array<double,2>& bob::learn::em::PLDABase::getAddGamma(const size_t a)
+{
+  if(!hasGamma(a)) precomputeGamma(a);
+  return m_cache_gamma[a];
+}
+
+void bob::learn::em::PLDABase::initMuFGSigma()
+{
+  // To avoid problems related to precomputation
+  m_mu = 0.;
+  bob::math::eye(m_F);
+  bob::math::eye(m_G);
+  m_sigma = 1.;
+  // Precompute variables
+  precompute();
+  precomputeLogLike();
+}
+
+void bob::learn::em::PLDABase::precompute()
+{
+  precomputeISigma();
+  precomputeGtISigma();
+  precomputeAlpha();
+  precomputeBeta();
+  m_cache_gamma.clear();
+  precomputeFtBeta();
+  m_cache_loglike_constterm.clear();
+}
+
+void bob::learn::em::PLDABase::precomputeLogLike()
+{
+  precomputeLogDetAlpha();
+  precomputeLogDetSigma();
+}
+
+void bob::learn::em::PLDABase::precomputeISigma()
+{
+  // Updates inverse of sigma
+  m_cache_isigma = 1. / m_sigma;
+}
+
+void bob::learn::em::PLDABase::precomputeGtISigma()
+{
+  // m_cache_Gt_isigma = G^T \Sigma^{-1}
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  blitz::Array<double,2> Gt = m_G.transpose(1,0);
+  m_cache_Gt_isigma = Gt(i,j) * m_cache_isigma(j);
+}
+
+void bob::learn::em::PLDABase::precomputeAlpha()
+{
+  // alpha = (Id + G^T.sigma^-1.G)^-1
+
+  // m_tmp_ng_ng_1 = G^T.sigma^-1.G
+  bob::math::prod(m_cache_Gt_isigma, m_G, m_tmp_ng_ng_1);
+  // m_tmp_ng_ng_1 = Id + G^T.sigma^-1.G
+  for(int i=0; i<m_tmp_ng_ng_1.extent(0); ++i) m_tmp_ng_ng_1(i,i) += 1;
+  // m_cache_alpha = (Id + G^T.sigma^-1.G)^-1
+  bob::math::inv(m_tmp_ng_ng_1, m_cache_alpha);
+}
+
+void bob::learn::em::PLDABase::precomputeBeta()
+{
+  // beta = (sigma + G.G^T)^-1
+  // BUT, there is a more efficient computation (Woodbury identity):
+  // beta = sigma^-1 - sigma^-1.G.(Id + G^T.sigma^-1.G)^-1.G^T.sigma^-1
+  // beta =  sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1
+
+  blitz::Array<double,2> GtISigmaT = m_cache_Gt_isigma.transpose(1,0);
+  // m_tmp_d_ng_1 = sigma^-1.G.alpha
+  bob::math::prod(GtISigmaT, m_cache_alpha, m_tmp_d_ng_1);
+  // m_cache_beta = -sigma^-1.G.alpha.G^T.sigma^-1
+  bob::math::prod(m_tmp_d_ng_1, m_cache_Gt_isigma, m_cache_beta);
+  m_cache_beta = -m_cache_beta;
+  // m_cache_beta = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1
+  for(int i=0; i<m_cache_beta.extent(0); ++i) m_cache_beta(i,i) += m_cache_isigma(i);
+}
+
+void bob::learn::em::PLDABase::precomputeGamma(const size_t a)
+{
+
+  blitz::Array<double,2> gamma_a(getDimF(),getDimF());
+  m_cache_gamma[a].reference(gamma_a);
+  computeGamma(a, gamma_a);
+}
+
+void bob::learn::em::PLDABase::precomputeFtBeta()
+{
+  // m_cache_Ft_beta = F^T.beta = F^T.(sigma + G.G^T)^-1
+  blitz::Array<double,2> Ft = m_F.transpose(1,0);
+  bob::math::prod(Ft, m_cache_beta, m_cache_Ft_beta);
+}
+
+void bob::learn::em::PLDABase::computeGamma(const size_t a,
+  blitz::Array<double,2> res) const
+{
+  // gamma = (Id + a.F^T.beta.F)^-1
+
+  // Checks destination size
+  bob::core::array::assertSameShape(res, m_tmp_nf_nf_1);
+  // m_tmp_nf_nf_1 = F^T.beta.F
+  bob::math::prod(m_cache_Ft_beta, m_F, m_tmp_nf_nf_1);
+   // m_tmp_nf_nf_1 = a.F^T.beta.F
+  m_tmp_nf_nf_1 *= static_cast<double>(a);
+  // m_tmp_nf_nf_1 = Id + a.F^T.beta.F
+  for(int i=0; i<m_tmp_nf_nf_1.extent(0); ++i) m_tmp_nf_nf_1(i,i) += 1;
+
+  // res = (Id + a.F^T.beta.F)^-1
+  bob::math::inv(m_tmp_nf_nf_1, res);
+}
+
+void bob::learn::em::PLDABase::precomputeLogDetAlpha()
+{
+  int sign;
+  m_cache_logdet_alpha = bob::math::slogdet(m_cache_alpha, sign);
+}
+
+void bob::learn::em::PLDABase::precomputeLogDetSigma()
+{
+  m_cache_logdet_sigma = blitz::sum(blitz::log(m_sigma));
+}
+
+double bob::learn::em::PLDABase::computeLogLikeConstTerm(const size_t a,
+  const blitz::Array<double,2>& gamma_a) const
+{
+  // loglike_constterm[a] = a/2 *
+  //  ( -D*log(2*pi) -log|sigma| +log|alpha| +log|gamma_a|)
+  int sign;
+  double logdet_gamma_a = bob::math::slogdet(gamma_a, sign);
+  double ah = static_cast<double>(a)/2.;
+  double res = ( -ah*((double)m_dim_d)*log(2*M_PI) -
+      ah*m_cache_logdet_sigma + ah*m_cache_logdet_alpha + logdet_gamma_a/2.);
+  return res;
+}
+
+double bob::learn::em::PLDABase::computeLogLikeConstTerm(const size_t a)
+{
+  const blitz::Array<double,2>& gamma_a = getAddGamma(a);
+  return computeLogLikeConstTerm(a, gamma_a);
+}
+
+void bob::learn::em::PLDABase::precomputeLogLikeConstTerm(const size_t a)
+{
+  double val = computeLogLikeConstTerm(a);
+  m_cache_loglike_constterm[a] = val;
+}
+
+double bob::learn::em::PLDABase::getLogLikeConstTerm(const size_t a) const
+{
+  if(!hasLogLikeConstTerm(a))
+    throw std::runtime_error("The LogLikelihood constant term for this number of samples is not currently in cache. You could use the getAddLogLikeConstTerm() method instead");
+  return (m_cache_loglike_constterm.find(a))->second;
+}
+
+double bob::learn::em::PLDABase::getAddLogLikeConstTerm(const size_t a)
+{
+  if(!hasLogLikeConstTerm(a)) precomputeLogLikeConstTerm(a);
+  return m_cache_loglike_constterm[a];
+}
+
+void bob::learn::em::PLDABase::clearMaps()
+{
+  m_cache_gamma.clear();
+  m_cache_loglike_constterm.clear();
+}
+
+double bob::learn::em::PLDABase::computeLogLikelihoodPointEstimate(
+  const blitz::Array<double,1>& xij, const blitz::Array<double,1>& hi,
+  const blitz::Array<double,1>& wij) const
+{
+  // Check inputs
+  bob::core::array::assertSameDimensionLength(xij.extent(0), getDimD());
+  bob::core::array::assertSameDimensionLength(hi.extent(0), getDimF());
+  bob::core::array::assertSameDimensionLength(wij.extent(0), getDimG());
+  // Computes: -D/2 log(2pi) -1/2 log(det(\Sigma))
+  //   -1/2 {(x_{ij}-(\mu+Fh_{i}+Gw_{ij}))^{T}\Sigma^{-1}(x_{ij}-(\mu+Fh_{i}+Gw_{ij}))}
+  double res = -0.5*((double)m_dim_d)*log(2*M_PI) - 0.5*m_cache_logdet_sigma;
+  // m_tmp_d_1 = (x_{ij} - (\mu+Fh_{i}+Gw_{ij}))
+  m_tmp_d_1 = xij - m_mu;
+  bob::math::prod(m_F, hi, m_tmp_d_2);
+  m_tmp_d_1 -= m_tmp_d_2;
+  bob::math::prod(m_G, wij, m_tmp_d_2);
+  m_tmp_d_1 -= m_tmp_d_2;
+  // add third term to res
+  res += -0.5*blitz::sum(blitz::pow2(m_tmp_d_1) * m_cache_isigma);
+  return res;
+}
+
+namespace bob { namespace learn { namespace em {
+  /**
+   * @brief Prints a PLDABase in the output stream. This will print
+   * the values of the parameters \f$\mu\f$, \f$F\f$, \f$G\f$ and
+   * \f$\Sigma\f$ of the PLDA model.
+   */
+  std::ostream& operator<<(std::ostream& os, const PLDABase& m) {
+    os << "mu = " << m.m_mu << std::endl;
+    os << "sigma = " << m.m_sigma << std::endl;
+    os << "F = " << m.m_F << std::endl;
+    os << "G = " << m.m_G << std::endl;
+    return os;
+  }
+} } }
+
+
+bob::learn::em::PLDAMachine::PLDAMachine():
+  m_plda_base(),
+  m_n_samples(0), m_nh_sum_xit_beta_xi(0), m_weighted_sum(0),
+  m_loglikelihood(0), m_cache_gamma(), m_cache_loglike_constterm(),
+  m_tmp_d_1(0), m_tmp_d_2(0), m_tmp_nf_1(0), m_tmp_nf_2(0), m_tmp_nf_nf_1(0,0)
+{
+}
+
+bob::learn::em::PLDAMachine::PLDAMachine(const boost::shared_ptr<bob::learn::em::PLDABase> plda_base):
+  m_plda_base(plda_base),
+  m_n_samples(0), m_nh_sum_xit_beta_xi(0), m_weighted_sum(plda_base->getDimF()),
+  m_loglikelihood(0), m_cache_gamma(), m_cache_loglike_constterm()
+{
+  resizeTmp();
+}
+
+
+bob::learn::em::PLDAMachine::PLDAMachine(const bob::learn::em::PLDAMachine& other):
+  m_plda_base(other.m_plda_base),
+  m_n_samples(other.m_n_samples),
+  m_nh_sum_xit_beta_xi(other.m_nh_sum_xit_beta_xi),
+  m_weighted_sum(bob::core::array::ccopy(other.m_weighted_sum)),
+  m_loglikelihood(other.m_loglikelihood), m_cache_gamma(),
+  m_cache_loglike_constterm(other.m_cache_loglike_constterm)
+{
+  bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
+  resizeTmp();
+}
+
+bob::learn::em::PLDAMachine::PLDAMachine(bob::io::base::HDF5File& config,
+    const boost::shared_ptr<bob::learn::em::PLDABase> plda_base):
+  m_plda_base(plda_base)
+{
+  load(config);
+}
+
+bob::learn::em::PLDAMachine::~PLDAMachine() {
+}
+
+bob::learn::em::PLDAMachine& bob::learn::em::PLDAMachine::operator=
+(const bob::learn::em::PLDAMachine& other)
+{
+  if(this!=&other)
+  {
+    m_plda_base = other.m_plda_base;
+    m_n_samples = other.m_n_samples;
+    m_nh_sum_xit_beta_xi = other.m_nh_sum_xit_beta_xi;
+    m_weighted_sum.reference(bob::core::array::ccopy(other.m_weighted_sum));
+    m_loglikelihood = other.m_loglikelihood;
+    bob::core::array::ccopy(other.m_cache_gamma, m_cache_gamma);
+    m_cache_loglike_constterm = other.m_cache_loglike_constterm;
+    resizeTmp();
+  }
+  return *this;
+}
+
+bool bob::learn::em::PLDAMachine::operator==
+    (const bob::learn::em::PLDAMachine& b) const
+{
+  if (!(( (!m_plda_base && !b.m_plda_base) ||
+          ((m_plda_base && b.m_plda_base) && *(m_plda_base) == *(b.m_plda_base))) &&
+        m_n_samples == b.m_n_samples &&
+        m_nh_sum_xit_beta_xi ==b.m_nh_sum_xit_beta_xi &&
+        bob::core::array::isEqual(m_weighted_sum, b.m_weighted_sum) &&
+        m_loglikelihood == b.m_loglikelihood &&
+        bob::core::array::isEqual(m_cache_gamma, b.m_cache_gamma)))
+    return false;
+
+  // m_cache_loglike_constterm
+  if (this->m_cache_loglike_constterm.size() != b.m_cache_loglike_constterm.size())
+    return false;  // differing sizes, they are not the same
+  std::map<size_t, double>::const_iterator i, j;
+  for (i = this->m_cache_loglike_constterm.begin(), j = b.m_cache_loglike_constterm.begin();
+    i != this->m_cache_loglike_constterm.end(); ++i, ++j)
+  {
+    if (i->first != j->first || i->second != j->second)
+      return false;
+  }
+
+  return true;
+}
+
+bool bob::learn::em::PLDAMachine::operator!=
+    (const bob::learn::em::PLDAMachine& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::em::PLDAMachine::is_similar_to(
+  const bob::learn::em::PLDAMachine& b, const double r_epsilon,
+  const double a_epsilon) const
+{
+  return (( (!m_plda_base && !b.m_plda_base) ||
+            ((m_plda_base && b.m_plda_base) &&
+             m_plda_base->is_similar_to(*(b.m_plda_base), r_epsilon, a_epsilon))) &&
+          m_n_samples == b.m_n_samples &&
+          bob::core::isClose(m_nh_sum_xit_beta_xi, b.m_nh_sum_xit_beta_xi, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_weighted_sum, b.m_weighted_sum, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_loglikelihood, b.m_loglikelihood, r_epsilon, a_epsilon) &&
+          bob::core::array::isClose(m_cache_gamma, b.m_cache_gamma, r_epsilon, a_epsilon) &&
+          bob::core::isClose(m_cache_loglike_constterm, b.m_cache_loglike_constterm, r_epsilon, a_epsilon));
+}
+
+void bob::learn::em::PLDAMachine::load(bob::io::base::HDF5File& config)
+{
+  //reads all data directly into the member variables
+  m_n_samples = config.read<uint64_t>("n_samples");
+  m_nh_sum_xit_beta_xi = config.read<double>("nh_sum_xit_beta_xi");
+  m_weighted_sum.reference(config.readArray<double,1>("weighted_sum"));
+  m_loglikelihood = config.read<double>("loglikelihood");
+  // gamma and log like constant term (a-dependent terms)
+  clearMaps();
+  if(config.contains("a_indices"))
+  {
+    blitz::Array<uint32_t, 1> a_indices;
+    a_indices.reference(config.readArray<uint32_t,1>("a_indices"));
+    for(int i=0; i<a_indices.extent(0); ++i)
+    {
+      std::string str1 = "gamma_" + boost::lexical_cast<std::string>(a_indices(i));
+      m_cache_gamma[a_indices(i)].reference(config.readArray<double,2>(str1));
+      std::string str2 = "loglikeconstterm_" + boost::lexical_cast<std::string>(a_indices(i));
+      m_cache_loglike_constterm[a_indices(i)] = config.read<double>(str2);
+    }
+  }
+  resizeTmp();
+}
+
+void bob::learn::em::PLDAMachine::save(bob::io::base::HDF5File& config) const
+{
+  config.set("n_samples", m_n_samples);
+  config.set("nh_sum_xit_beta_xi", m_nh_sum_xit_beta_xi);
+  config.setArray("weighted_sum", m_weighted_sum);
+  config.set("loglikelihood", m_loglikelihood);
+  // Gamma
+  if(m_cache_gamma.size() > 0)
+  {
+    blitz::Array<uint32_t, 1> a_indices(m_cache_gamma.size());
+    int i = 0;
+    for(std::map<size_t,blitz::Array<double,2> >::const_iterator
+        it=m_cache_gamma.begin(); it!=m_cache_gamma.end(); ++it)
+    {
+      a_indices(i) = it->first;
+      std::string str1 = "gamma_" + boost::lexical_cast<std::string>(it->first);
+      config.setArray(str1, it->second);
+      std::string str2 = "loglikeconstterm_" + boost::lexical_cast<std::string>(it->first);
+      double v = m_cache_loglike_constterm.find(it->first)->second;
+      config.set(str2, v);
+      ++i;
+    }
+    config.setArray("a_indices", a_indices);
+  }
+}
+
+void bob::learn::em::PLDAMachine::setPLDABase(const boost::shared_ptr<bob::learn::em::PLDABase> plda_base)
+{
+  m_plda_base = plda_base;
+  m_weighted_sum.resizeAndPreserve(getDimF());
+  clearMaps();
+  resizeTmp();
+}
+
+
+void bob::learn::em::PLDAMachine::setWeightedSum(const blitz::Array<double,1>& ws)
+{
+  if(ws.extent(0) != m_weighted_sum.extent(0)) {
+    boost::format m("size of parameter `ws' (%d) does not match the expected size (%d)");
+    m % ws.extent(0) % m_weighted_sum.extent(0);
+    throw std::runtime_error(m.str());
+  }
+  m_weighted_sum.reference(bob::core::array::ccopy(ws));
+}
+
+const blitz::Array<double,2>& bob::learn::em::PLDAMachine::getGamma(const size_t a) const
+{
+  // Checks in both base machine and this machine
+  if (m_plda_base->hasGamma(a)) return m_plda_base->getGamma(a);
+  else if (!hasGamma(a))
+    throw std::runtime_error("Gamma for this number of samples is not currently in cache. You could use the getAddGamma() method instead");
+  return (m_cache_gamma.find(a))->second;
+}
+
+const blitz::Array<double,2>& bob::learn::em::PLDAMachine::getAddGamma(const size_t a)
+{
+  if (m_plda_base->hasGamma(a)) return m_plda_base->getGamma(a);
+  else if (hasGamma(a)) return m_cache_gamma[a];
+  // else computes it and adds it to this machine
+  blitz::Array<double,2> gamma_a(getDimF(),getDimF());
+  m_cache_gamma[a].reference(gamma_a);
+  m_plda_base->computeGamma(a, gamma_a);
+  return m_cache_gamma[a];
+}
+
+double bob::learn::em::PLDAMachine::getLogLikeConstTerm(const size_t a) const
+{
+  // Checks in both base machine and this machine
+  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+  if (m_plda_base->hasLogLikeConstTerm(a)) return m_plda_base->getLogLikeConstTerm(a);
+  else if (!hasLogLikeConstTerm(a))
+    throw std::runtime_error("The LogLikelihood constant term for this number of samples is not currently in cache. You could use the getAddLogLikeConstTerm() method instead");
+  return (m_cache_loglike_constterm.find(a))->second;
+}
+
+double bob::learn::em::PLDAMachine::getAddLogLikeConstTerm(const size_t a)
+{
+  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+  if (m_plda_base->hasLogLikeConstTerm(a)) return m_plda_base->getLogLikeConstTerm(a);
+  else if (hasLogLikeConstTerm(a)) return m_cache_loglike_constterm[a];
+  // else computes it and adds it to this machine
+  m_cache_loglike_constterm[a] =
+        m_plda_base->computeLogLikeConstTerm(a, getAddGamma(a));
+  return m_cache_loglike_constterm[a];
+}
+
+void bob::learn::em::PLDAMachine::clearMaps()
+{
+  m_cache_gamma.clear();
+  m_cache_loglike_constterm.clear();
+}
+
+double bob::learn::em::PLDAMachine::forward(const blitz::Array<double,1>& sample)
+{
+  return forward_(sample);
+}
+
+double bob::learn::em::PLDAMachine::forward_(const blitz::Array<double,1>& sample)
+{
+  // Computes the log likelihood ratio
+  return computeLogLikelihood(sample, true) - // match
+          (computeLogLikelihood(sample, false) + m_loglikelihood); // no match
+}
+
+double bob::learn::em::PLDAMachine::forward(const blitz::Array<double,2>& samples)
+{
+  // Computes the log likelihood ratio
+  return computeLogLikelihood(samples, true) - // match
+          (computeLogLikelihood(samples, false) + m_loglikelihood); // no match
+}
+
+double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<double,1>& sample,
+  bool enrol) const
+{
+  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+  // Check dimensionality
+  bob::core::array::assertSameDimensionLength(sample.extent(0), getDimD());
+
+  int n_samples = 1 + (enrol?m_n_samples:0);
+
+  // 3/ Third term of the likelihood: -1/2*X^T*(SIGMA+A.A^T)^-1*X
+  //    Efficient way: -1/2*sum_i(xi^T.sigma^-1.xi - xi^T.sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1.xi
+  //      -1/2*sumWeighted^T*(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1).F)^-1*sumWeighted
+  //      where sumWeighted = sum_i(F^T*(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1)*xi)
+  const blitz::Array<double,2>& beta = getPLDABase()->getBeta();
+  const blitz::Array<double,2>& Ft_beta = getPLDABase()->getFtBeta();
+  const blitz::Array<double,1>& mu = getPLDABase()->getMu();
+  double terma = (enrol?m_nh_sum_xit_beta_xi:0.);
+  // sumWeighted
+  if (enrol && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
+  else m_tmp_nf_1 = 0;
+
+  // terma += -1 / 2. * (xi^t*beta*xi)
+  m_tmp_d_1 = sample - mu;
+  bob::math::prod(beta, m_tmp_d_1, m_tmp_d_2);
+  terma += -1 / 2. * (blitz::sum(m_tmp_d_1*m_tmp_d_2));
+
+  // sumWeighted
+  bob::math::prod(Ft_beta, m_tmp_d_1, m_tmp_nf_2);
+  m_tmp_nf_1 += m_tmp_nf_2;
+  blitz::Array<double,2> gamma_a;
+  if (hasGamma(n_samples) || m_plda_base->hasGamma(n_samples))
+    gamma_a.reference(getGamma(n_samples));
+  else
+  {
+    gamma_a.reference(m_tmp_nf_nf_1);
+    m_plda_base->computeGamma(n_samples, gamma_a);
+  }
+  bob::math::prod(gamma_a, m_tmp_nf_1, m_tmp_nf_2);
+  double termb = 1 / 2. * (blitz::sum(m_tmp_nf_1*m_tmp_nf_2));
+
+  // 1/2/ Constant term of the log likelihood:
+  //      1/ First term of the likelihood: -Nsamples*D/2*log(2*PI)
+  //      2/ Second term of the likelihood: -1/2*log(det(SIGMA+A.A^T))
+  //        Efficient way: -Nsamples/2*log(det(sigma))-Nsamples/2*log(det(I+G^T.sigma^-1.G))
+  //       -1/2*log(det(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)*G^T*sigma^-1).F))
+  double log_likelihood; // = getAddLogLikeConstTerm(static_cast<size_t>(n_samples));
+  if (hasLogLikeConstTerm(n_samples) || m_plda_base->hasLogLikeConstTerm(n_samples))
+    log_likelihood = getLogLikeConstTerm(n_samples);
+  else
+    log_likelihood = m_plda_base->computeLogLikeConstTerm(n_samples, gamma_a);
+
+  log_likelihood += terma + termb;
+  return log_likelihood;
+}
+
+double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<double,2>& samples,
+  bool enrol) const
+{
+  if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+  // Check dimensionality
+  bob::core::array::assertSameDimensionLength(samples.extent(1), getDimD());
+
+  int n_samples = samples.extent(0) + (enrol?m_n_samples:0);
+  // 3/ Third term of the likelihood: -1/2*X^T*(SIGMA+A.A^T)^-1*X
+  //    Efficient way: -1/2*sum_i(xi^T.sigma^-1.xi - xi^T.sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1.xi
+  //      -1/2*sumWeighted^T*(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1).F)^-1*sumWeighted
+  //      where sumWeighted = sum_i(F^T*(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1)*xi)
+  const blitz::Array<double,2>& beta = getPLDABase()->getBeta();
+  const blitz::Array<double,2>& Ft_beta = getPLDABase()->getFtBeta();
+  const blitz::Array<double,1>& mu = getPLDABase()->getMu();
+  double terma = (enrol?m_nh_sum_xit_beta_xi:0.);
+  // sumWeighted
+  if (enrol && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
+  else m_tmp_nf_1 = 0;
+  for (int k=0; k<samples.extent(0); ++k)
+  {
+    blitz::Array<double,1> samp = samples(k,blitz::Range::all());
+    m_tmp_d_1 = samp - mu;
+    // terma += -1 / 2. * (xi^t*beta*xi)
+    bob::math::prod(beta, m_tmp_d_1, m_tmp_d_2);
+    terma += -1 / 2. * (blitz::sum(m_tmp_d_1*m_tmp_d_2));
+
+    // sumWeighted
+    bob::math::prod(Ft_beta, m_tmp_d_1, m_tmp_nf_2);
+    m_tmp_nf_1 += m_tmp_nf_2;
+  }
+
+  blitz::Array<double,2> gamma_a;
+  if (hasGamma(n_samples) || m_plda_base->hasGamma(n_samples))
+    gamma_a.reference(getGamma(n_samples));
+  else
+  {
+    gamma_a.reference(m_tmp_nf_nf_1);
+    m_plda_base->computeGamma(n_samples, gamma_a);
+  }
+  bob::math::prod(gamma_a, m_tmp_nf_1, m_tmp_nf_2);
+  double termb = 1 / 2. * (blitz::sum(m_tmp_nf_1*m_tmp_nf_2));
+
+  // 1/2/ Constant term of the log likelihood:
+  //      1/ First term of the likelihood: -Nsamples*D/2*log(2*PI)
+  //      2/ Second term of the likelihood: -1/2*log(det(SIGMA+A.A^T))
+  //        Efficient way: -Nsamples/2*log(det(sigma))-Nsamples/2*log(det(I+G^T.sigma^-1.G))
+  //       -1/2*log(det(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)*G^T*sigma^-1).F))
+  double log_likelihood; // = getAddLogLikeConstTerm(static_cast<size_t>(n_samples));
+  if (hasLogLikeConstTerm(n_samples) || m_plda_base->hasLogLikeConstTerm(n_samples))
+    log_likelihood = getLogLikeConstTerm(n_samples);
+  else
+    log_likelihood = m_plda_base->computeLogLikeConstTerm(n_samples, gamma_a);
+
+  log_likelihood += terma + termb;
+  return log_likelihood;
+}
+
+void bob::learn::em::PLDAMachine::resize(const size_t dim_d, const size_t dim_f,
+  const size_t dim_g)
+{
+  m_weighted_sum.resizeAndPreserve(dim_f);
+  clearMaps();
+  resizeTmp();
+}
+
+void bob::learn::em::PLDAMachine::resizeTmp()
+{
+  if (m_plda_base)
+  {
+    m_tmp_d_1.resize(getDimD());
+    m_tmp_d_2.resize(getDimD());
+    m_tmp_nf_1.resize(getDimF());
+    m_tmp_nf_2.resize(getDimF());
+    m_tmp_nf_nf_1.resize(getDimF(), getDimF());
+  }
+}
diff --git a/bob/learn/em/cpp/PLDATrainer.cpp b/bob/learn/em/cpp/PLDATrainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..f04e1ea669486952eb4d1c095bfd845309e8c696
--- /dev/null
+++ b/bob/learn/em/cpp/PLDATrainer.cpp
@@ -0,0 +1,800 @@
+/**
+ * @date Fri Oct 14 18:07:56 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Probabilistic Linear Discriminant Analysis
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+
+#include <bob.learn.em/PLDATrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.math/svd.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+#include <limits>
+#include <vector>
+
+#include <bob.math/linear.h>
+#include <bob.math/linsolve.h>
+
+
+
+bob::learn::em::PLDATrainer::PLDATrainer(const bool use_sum_second_order):
+  m_rng(new boost::mt19937()),
+  m_dim_d(0), m_dim_f(0), m_dim_g(0),
+  m_use_sum_second_order(use_sum_second_order),
+  m_initF_method(bob::learn::em::PLDATrainer::RANDOM_F), m_initF_ratio(1.),
+  m_initG_method(bob::learn::em::PLDATrainer::RANDOM_G), m_initG_ratio(1.),
+  m_initSigma_method(bob::learn::em::PLDATrainer::RANDOM_SIGMA),
+  m_initSigma_ratio(1.),
+  m_cache_S(0,0),
+  m_cache_z_first_order(0), m_cache_sum_z_second_order(0,0), m_cache_z_second_order(0),
+  m_cache_n_samples_per_id(0), m_cache_n_samples_in_training(), m_cache_B(0,0),
+  m_cache_Ft_isigma_G(0,0), m_cache_eta(0,0), m_cache_zeta(), m_cache_iota(),
+  m_tmp_nf_1(0), m_tmp_nf_2(0), m_tmp_ng_1(0),
+  m_tmp_D_1(0), m_tmp_D_2(0),
+  m_tmp_nfng_nfng(0,0), m_tmp_D_nfng_1(0,0), m_tmp_D_nfng_2(0,0)
+{
+}
+
+bob::learn::em::PLDATrainer::PLDATrainer(const bob::learn::em::PLDATrainer& other):
+  m_rng(other.m_rng),
+  m_dim_d(other.m_dim_d), m_dim_f(other.m_dim_f), m_dim_g(other.m_dim_g),
+  m_use_sum_second_order(other.m_use_sum_second_order),
+  m_initF_method(other.m_initF_method), m_initF_ratio(other.m_initF_ratio),
+  m_initG_method(other.m_initG_method), m_initG_ratio(other.m_initG_ratio),
+  m_initSigma_method(other.m_initSigma_method), m_initSigma_ratio(other.m_initSigma_ratio),
+  m_cache_S(bob::core::array::ccopy(other.m_cache_S)),
+  m_cache_z_first_order(),
+  m_cache_sum_z_second_order(bob::core::array::ccopy(other.m_cache_sum_z_second_order)),
+  m_cache_z_second_order(),
+  m_cache_n_samples_per_id(other.m_cache_n_samples_per_id),
+  m_cache_n_samples_in_training(other.m_cache_n_samples_in_training),
+  m_cache_B(bob::core::array::ccopy(other.m_cache_B)),
+  m_cache_Ft_isigma_G(bob::core::array::ccopy(other.m_cache_Ft_isigma_G)),
+  m_cache_eta(bob::core::array::ccopy(other.m_cache_eta))
+{
+  bob::core::array::ccopy(other.m_cache_z_first_order, m_cache_z_first_order);
+  bob::core::array::ccopy(other.m_cache_z_second_order, m_cache_z_second_order);
+  bob::core::array::ccopy(other.m_cache_zeta, m_cache_zeta);
+  bob::core::array::ccopy(other.m_cache_iota, m_cache_iota);
+  // Resize working arrays
+  resizeTmp();
+}
+
+bob::learn::em::PLDATrainer::~PLDATrainer() {}
+
+bob::learn::em::PLDATrainer& bob::learn::em::PLDATrainer::operator=
+(const bob::learn::em::PLDATrainer& other)
+{
+  if(this != &other)
+  {
+    m_rng = m_rng,
+    m_dim_d = other.m_dim_d;
+    m_dim_f = other.m_dim_f;
+    m_dim_g = other.m_dim_g;
+    m_use_sum_second_order = other.m_use_sum_second_order;
+    m_initF_method = other.m_initF_method;
+    m_initF_ratio = other.m_initF_ratio;
+    m_initG_method = other.m_initG_method;
+    m_initG_ratio = other.m_initG_ratio;
+    m_initSigma_method = other.m_initSigma_method;
+    m_initSigma_ratio = other.m_initSigma_ratio;
+    m_cache_S = bob::core::array::ccopy(other.m_cache_S);
+    bob::core::array::ccopy(other.m_cache_z_first_order, m_cache_z_first_order);
+    m_cache_sum_z_second_order = bob::core::array::ccopy(other.m_cache_sum_z_second_order);
+    bob::core::array::ccopy(other.m_cache_z_second_order, m_cache_z_second_order);
+    m_cache_n_samples_per_id = other.m_cache_n_samples_per_id;
+    m_cache_n_samples_in_training = other.m_cache_n_samples_in_training;
+    m_cache_B = bob::core::array::ccopy(other.m_cache_B);
+    m_cache_Ft_isigma_G = bob::core::array::ccopy(other.m_cache_Ft_isigma_G);
+    m_cache_eta = bob::core::array::ccopy(other.m_cache_eta);
+    bob::core::array::ccopy(other.m_cache_iota, m_cache_iota);
+    // Resize working arrays
+    resizeTmp();
+  }
+  return *this;
+}
+
+bool bob::learn::em::PLDATrainer::operator==
+  (const bob::learn::em::PLDATrainer& other) const
+{
+  return m_rng == m_rng &&
+         m_dim_d == other.m_dim_d &&
+         m_dim_f == other.m_dim_f &&
+         m_dim_g == other.m_dim_g &&
+         m_initF_method == other.m_initF_method &&
+         m_initF_ratio == other.m_initF_ratio &&
+         m_initG_method == other.m_initG_method &&
+         m_initG_ratio == other.m_initG_ratio &&
+         m_initSigma_method == other.m_initSigma_method &&
+         m_initSigma_ratio == other.m_initSigma_ratio &&
+         bob::core::array::isEqual(m_cache_S, m_cache_S) &&
+         bob::core::array::isEqual(m_cache_z_first_order, other.m_cache_z_first_order) &&
+         bob::core::array::isEqual(m_cache_sum_z_second_order, other.m_cache_sum_z_second_order) &&
+         bob::core::array::isEqual(m_cache_z_second_order, other.m_cache_z_second_order) &&
+         m_cache_n_samples_per_id.size() == m_cache_n_samples_per_id.size() &&
+         std::equal(m_cache_n_samples_per_id.begin(), m_cache_n_samples_per_id.end(), other.m_cache_n_samples_per_id.begin()) &&
+         m_cache_n_samples_in_training.size() == m_cache_n_samples_in_training.size() &&
+         std::equal(m_cache_n_samples_in_training.begin(), m_cache_n_samples_in_training.end(), other.m_cache_n_samples_in_training.begin()) &&
+         bob::core::array::isEqual(m_cache_B, other.m_cache_B) &&
+         bob::core::array::isEqual(m_cache_Ft_isigma_G, other.m_cache_Ft_isigma_G) &&
+         bob::core::array::isEqual(m_cache_eta, other.m_cache_eta) &&
+         bob::core::array::isEqual(m_cache_zeta, other.m_cache_zeta) &&
+         bob::core::array::isEqual(m_cache_iota, other.m_cache_iota);
+}
+
+bool bob::learn::em::PLDATrainer::operator!=
+  (const bob::learn::em::PLDATrainer &other) const
+{
+  return !(this->operator==(other));
+}
+
+bool bob::learn::em::PLDATrainer::is_similar_to
+  (const bob::learn::em::PLDATrainer &other, const double r_epsilon,
+   const double a_epsilon) const
+{
+  return m_rng == m_rng &&
+         m_dim_d == other.m_dim_d &&
+         m_dim_f == other.m_dim_f &&
+         m_dim_g == other.m_dim_g &&
+         m_use_sum_second_order == other.m_use_sum_second_order &&
+         m_initF_method == other.m_initF_method &&
+         bob::core::isClose(m_initF_ratio, other.m_initF_ratio, r_epsilon, a_epsilon) &&
+         m_initG_method == other.m_initG_method &&
+         bob::core::isClose(m_initG_ratio, other.m_initG_ratio, r_epsilon, a_epsilon) &&
+         m_initSigma_method == other.m_initSigma_method &&
+         bob::core::isClose(m_initSigma_ratio, other.m_initSigma_ratio, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_S, m_cache_S, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_z_first_order, other.m_cache_z_first_order, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_sum_z_second_order, other.m_cache_sum_z_second_order, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_z_second_order, other.m_cache_z_second_order, r_epsilon, a_epsilon) &&
+         m_cache_n_samples_per_id.size() == m_cache_n_samples_per_id.size() &&
+         std::equal(m_cache_n_samples_per_id.begin(), m_cache_n_samples_per_id.end(), other.m_cache_n_samples_per_id.begin()) &&
+         m_cache_n_samples_in_training.size() == m_cache_n_samples_in_training.size() &&
+         std::equal(m_cache_n_samples_in_training.begin(), m_cache_n_samples_in_training.end(), other.m_cache_n_samples_in_training.begin()) &&
+         bob::core::array::isClose(m_cache_B, other.m_cache_B, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_Ft_isigma_G, other.m_cache_Ft_isigma_G, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_eta, other.m_cache_eta, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_zeta, other.m_cache_zeta, r_epsilon, a_epsilon) &&
+         bob::core::array::isClose(m_cache_iota, other.m_cache_iota, r_epsilon, a_epsilon);
+}
+
+void bob::learn::em::PLDATrainer::initialize(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Checks training data
+  checkTrainingData(v_ar);
+
+  // Gets dimension (first Arrayset)
+  size_t n_features = v_ar[0].extent(1);
+  m_dim_d = machine.getDimD();
+  // Get dimensionalities from the PLDABase
+  bob::core::array::assertSameDimensionLength(n_features, m_dim_d);
+  m_dim_f = machine.getDimF();
+  m_dim_g = machine.getDimG();
+
+  // Reinitializes array members
+  initMembers(v_ar);
+
+  // Computes the mean and the covariance if required
+  computeMeanVariance(machine, v_ar);
+
+  // Initialization (e.g. using scatter)
+  initFGSigma(machine, v_ar);
+}
+
+void bob::learn::em::PLDATrainer::finalize(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Precomputes constant parts of the log likelihood and (gamma_a)
+  precomputeLogLike(machine, v_ar);
+  // Adds the case 1 sample if not already done (always used for scoring)
+  machine.getAddGamma(1);
+  machine.getAddLogLikeConstTerm(1);
+}
+
+void bob::learn::em::PLDATrainer::checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Checks that the vector of Arraysets is not empty
+  if (v_ar.size() == 0) {
+    throw std::runtime_error("input training set is empty");
+  }
+
+  // Gets dimension (first Arrayset)
+  int n_features = v_ar[0].extent(1);
+  // Checks dimension consistency
+  for (size_t i=0; i<v_ar.size(); ++i) {
+    if (v_ar[i].extent(1) != n_features) {
+      boost::format m("number of features (columns) of array for class %u (%d) does not match that of array for class 0 (%d)");
+      m % i % v_ar[0].extent(1) % n_features;
+      throw std::runtime_error(m.str());
+    }
+  }
+}
+
+void bob::learn::em::PLDATrainer::initMembers(const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Gets dimension (first Arrayset)
+  const size_t n_features = v_ar[0].extent(1); // dimensionality of the data
+  const size_t n_identities = v_ar.size();
+
+  m_cache_S.resize(n_features, n_features);
+  m_cache_sum_z_second_order.resize(m_dim_f+m_dim_g, m_dim_f+m_dim_g);
+
+  // Loops over the identities
+  for (size_t i=0; i<n_identities; ++i)
+  {
+    // Number of training samples for this identity
+    const size_t n_i = v_ar[i].extent(0);
+    // m_cache_z_first_order
+    blitz::Array<double,2> z_i(n_i, m_dim_f+m_dim_g);
+    m_cache_z_first_order.push_back(z_i);
+    // m_z_second_order
+    if (!m_use_sum_second_order)
+    {
+      blitz::Array<double,3> z2_i(n_i, m_dim_f+m_dim_g, m_dim_f+m_dim_g);
+      m_cache_z_second_order.push_back(z2_i);
+    }
+
+    // m_cache_n_samples_per_id
+    m_cache_n_samples_per_id.push_back(n_i);
+
+    // Maps dependent on the number of samples per identity
+    std::map<size_t,bool>::iterator it;
+    it = m_cache_n_samples_in_training.find(n_i);
+    if (it == m_cache_n_samples_in_training.end())
+    {
+      // Indicates if there are identities with n_i training samples and if
+      // corresponding matrices are up to date.
+      m_cache_n_samples_in_training[n_i] = false;
+      // Allocates arrays for identities with n_i training samples
+      m_cache_zeta[n_i].reference(blitz::Array<double,2>(m_dim_g, m_dim_g));
+      m_cache_iota[n_i].reference(blitz::Array<double,2>(m_dim_f, m_dim_g));
+    }
+  }
+
+  m_cache_B.resize(n_features, m_dim_f+m_dim_g);
+  m_cache_Ft_isigma_G.resize(m_dim_f, m_dim_g);
+  m_cache_eta.resize(m_dim_f, m_dim_g);
+
+  // Working arrays
+  resizeTmp();
+}
+
+void bob::learn::em::PLDATrainer::resizeTmp()
+{
+  m_tmp_nf_1.resize(m_dim_f);
+  m_tmp_nf_2.resize(m_dim_f);
+  m_tmp_ng_1.resize(m_dim_g);
+  m_tmp_D_1.resize(m_dim_d);
+  m_tmp_D_2.resize(m_dim_d);
+  m_tmp_nfng_nfng.resize(m_dim_f+m_dim_g, m_dim_f+m_dim_g);
+  m_tmp_D_nfng_1.resize(m_dim_d, m_dim_f+m_dim_g);
+  m_tmp_D_nfng_2.resize(m_dim_d, m_dim_f+m_dim_g);
+}
+
+void bob::learn::em::PLDATrainer::computeMeanVariance(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  blitz::Array<double,1>& mu = machine.updateMu();
+  blitz::Range all = blitz::Range::all();
+  // TODO: Uncomment variance computation if required
+  /*  if(m_compute_likelihood)
+  {
+    // loads all the data in a single shot - required for scatter
+    blitz::Array<double,2> data(n_features, n_samples);
+    for (size_t i=0; i<n_samples; ++i)
+      data(all,i) = ar(i,all);
+    // Mean and scatter computation
+    bob::math::scatter(data, m_cache_S, mu);
+    // divides scatter by N-1
+    m_cache_S /= static_cast<double>(n_samples-1);
+  }
+  else */
+  {
+    // Computes the mean and updates mu
+    mu = 0.;
+    size_t n_samples = 0;
+    for (size_t j=0; j<v_ar.size(); ++j) {
+      n_samples += v_ar[j].extent(0);
+      for (int i=0; i<v_ar[j].extent(0); ++i)
+        mu += v_ar[j](i,all);
+    }
+    mu /= static_cast<double>(n_samples);
+    m_cache_S = 0.;
+  }
+}
+
+void bob::learn::em::PLDATrainer::initFGSigma(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Initializes F, G and sigma
+  initF(machine, v_ar);
+  initG(machine, v_ar);
+  initSigma(machine, v_ar);
+
+  // Precomputes values using new F, G and sigma
+  machine.precompute();
+}
+
+void bob::learn::em::PLDATrainer::initF(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  blitz::Array<double,2>& F = machine.updateF();
+  blitz::Range a = blitz::Range::all();
+
+  // 1: between-class scatter
+  if (m_initF_method == bob::learn::em::PLDATrainer::BETWEEN_SCATTER)
+  {
+    if (machine.getDimF() > v_ar.size()) {
+      boost::format m("The rank of the matrix F ('%ld') can't be larger than the number of classes in the training set ('%ld')");
+      m % machine.getDimF() % v_ar.size();
+      throw std::runtime_error(m.str());
+    }
+
+    // a/ Computes between-class scatter matrix
+    blitz::firstIndex bi;
+    blitz::secondIndex bj;
+    blitz::Array<double,2> S(machine.getDimD(), v_ar.size());
+    S = 0.;
+    m_tmp_D_1 = 0.;
+    for (size_t i=0; i<v_ar.size(); ++i)
+    {
+      blitz::Array<double,1> Si = S(blitz::Range::all(),i);
+      Si = 0.;
+      for (int j=0; j<v_ar[i].extent(0); ++j)
+      {
+        // Si += x_ij
+        Si += v_ar[i](j,a);
+      }
+      // Si = mean of the samples class i
+      Si /= static_cast<double>(v_ar[i].extent(0));
+      m_tmp_D_1 += Si;
+    }
+    m_tmp_D_1 /= static_cast<double>(v_ar.size());
+
+    // b/ Removes the mean
+    S = S(bi,bj) - m_tmp_D_1(bi);
+
+    // c/ SVD of the between-class scatter matrix
+    const size_t n_singular = std::min(machine.getDimD(),v_ar.size());
+    blitz::Array<double,2> U(machine.getDimD(), n_singular);
+    blitz::Array<double,1> sigma(n_singular);
+    bob::math::svd(S, U, sigma);
+
+    // d/ Updates F
+    blitz::Array<double,2> Uslice = U(a, blitz::Range(0,m_dim_f-1));
+    blitz::Array<double,1> sigma_slice = sigma(blitz::Range(0,m_dim_f-1));
+    sigma_slice = blitz::sqrt(sigma_slice);
+    F = Uslice(bi,bj) / sigma_slice(bj);
+  }
+  // otherwise: random initialization
+  else {
+    // F initialization
+    bob::core::array::randn(*m_rng, F);
+    F *= m_initF_ratio;
+  }
+}
+
+void bob::learn::em::PLDATrainer::initG(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  blitz::Array<double,2>& G = machine.updateG();
+  blitz::Range a = blitz::Range::all();
+
+  // 1: within-class scatter
+  if (m_initG_method == bob::learn::em::PLDATrainer::WITHIN_SCATTER)
+  {
+    // a/ Computes within-class scatter matrix
+    blitz::firstIndex bi;
+    blitz::secondIndex bj;
+    size_t Nsamples=0;
+    for (size_t i=0; i<v_ar.size(); ++i)
+      Nsamples += v_ar[i].extent(0);
+
+    blitz::Array<double,2> S(machine.getDimD(), Nsamples);
+    S = 0.;
+    m_tmp_D_1 = 0.;
+    int counter = 0;
+    for (size_t i=0; i<v_ar.size(); ++i)
+    {
+      // Computes the mean of the samples class i
+      m_tmp_D_2 = 0.;
+      for (int j=0; j<v_ar[i].extent(0); ++j)
+      {
+        // m_tmp_D_2 += x_ij
+        m_tmp_D_2 += v_ar[i](j,a);
+      }
+      // m_tmp_D_2 = mean of the samples class i
+      m_tmp_D_2 /= static_cast<double>(v_ar[i].extent(0));
+
+      // Generates the scatter
+      for (int j=0; j<v_ar[i].extent(0); ++j)
+      {
+        blitz::Array<double,1> Si = S(a, counter);
+        // Si = x_ij - mean_i
+        Si = v_ar[i](j,a) - m_tmp_D_2;
+        // mean of the within class
+        m_tmp_D_1 += Si;
+        ++counter;
+      }
+    }
+    m_tmp_D_1 /= static_cast<double>(Nsamples);
+
+    // b/ Removes the mean
+    S = S(bi,bj) - m_tmp_D_1(bi);
+
+    // c/ SVD of the between-class scatter matrix
+    blitz::Array<double,2> U(m_dim_d, std::min(m_dim_d, Nsamples));
+    blitz::Array<double,1> sigma(std::min(m_dim_d, Nsamples));
+    bob::math::svd(S, U, sigma);
+
+    // d/ Updates G
+    blitz::Array<double,2> Uslice = U(blitz::Range::all(), blitz::Range(0,m_dim_g-1));
+    blitz::Array<double,1> sigma_slice = sigma(blitz::Range(0,m_dim_g-1));
+    sigma_slice = blitz::sqrt(sigma_slice);
+    G = Uslice(bi,bj) / sigma_slice(bj);
+  }
+  // otherwise: random initialization
+  else {
+    // G initialization
+    bob::core::array::randn(*m_rng, G);
+    G *= m_initG_ratio;
+  }
+}
+
+void bob::learn::em::PLDATrainer::initSigma(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  blitz::Array<double,1>& sigma = machine.updateSigma();
+  blitz::Range a = blitz::Range::all();
+
+  // 1: percentage of the variance of G
+  if (m_initSigma_method == bob::learn::em::PLDATrainer::VARIANCE_G) {
+    const blitz::Array<double,2>& G = machine.getG();
+    blitz::secondIndex bj;
+    m_tmp_D_1 = blitz::mean(G, bj);
+    // Updates sigma
+    sigma = blitz::fabs(m_tmp_D_1) * m_initSigma_ratio;
+  }
+  // 2: constant value
+  else if (m_initSigma_method == bob::learn::em::PLDATrainer::CONSTANT) {
+    sigma = m_initSigma_ratio;
+  }
+  // 3: percentage of the variance of the data
+  else if (m_initSigma_method == bob::learn::em::PLDATrainer::VARIANCE_DATA) {
+    // a/ Computes the global mean
+    //    m_tmp_D_1 = 1/N sum_i x_i
+    m_tmp_D_1 = 0.;
+    size_t Ns = 0;
+    for (size_t i=0; i<v_ar.size(); ++i)
+    {
+      for (int j=0; j<v_ar[i].extent(0); ++j)
+        m_tmp_D_1 += v_ar[i](j,a);
+      Ns += v_ar[i].extent(0);
+    }
+    m_tmp_D_1 /= static_cast<double>(Ns);
+
+    // b/ Computes the variance:
+    m_tmp_D_2 = 0.;
+    for (size_t i=0; i<v_ar.size(); ++i)
+      for (int j=0; j<v_ar[i].extent(0); ++j)
+        m_tmp_D_2 += blitz::pow2(v_ar[i](j,a) - m_tmp_D_1);
+    sigma = m_initSigma_ratio * m_tmp_D_2 / static_cast<double>(Ns-1);
+  }
+  // otherwise: random initialization
+  else {
+    // sigma initialization
+    bob::core::array::randn(*m_rng, sigma);
+    sigma = blitz::fabs(sigma) * m_initSigma_ratio;
+  }
+  // Apply variance threshold
+  machine.applyVarianceThreshold();
+}
+
+void bob::learn::em::PLDATrainer::eStep(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Precomputes useful variables using current estimates of F,G, and sigma
+  precomputeFromFGSigma(machine);
+  // Gets the mean mu from the machine
+  const blitz::Array<double,1>& mu = machine.getMu();
+  const blitz::Array<double,2>& alpha = machine.getAlpha();
+  const blitz::Array<double,2>& F = machine.getF();
+  const blitz::Array<double,2>& FtBeta = machine.getFtBeta();
+  const blitz::Array<double,2>& GtISigma = machine.getGtISigma();
+  blitz::Range a = blitz::Range::all();
+
+  // blitz indices
+  blitz::firstIndex bi;
+  blitz::secondIndex bj;
+  // Initializes sum of z second order statistics to 0
+  m_cache_sum_z_second_order = 0.;
+  for (size_t i=0; i<v_ar.size(); ++i)
+  {
+    // Computes expectation of z_ij = [h_i w_ij]
+    // 1/a/ Computes expectation of h_i
+    // Loop over the samples
+    m_tmp_nf_1 = 0.;
+    for (int j=0; j<v_ar[i].extent(0); ++j)
+    {
+      // m_tmp_D_1 = x_sj-mu
+      m_tmp_D_1 = v_ar[i](j,a) - mu;
+
+      // m_tmp_nf_2 = F^T.beta.(x_sj-mu)
+      bob::math::prod(FtBeta, m_tmp_D_1, m_tmp_nf_2);
+      // m_tmp_nf_1 = sum_j F^T.beta.(x_sj-mu)
+      m_tmp_nf_1 += m_tmp_nf_2;
+    }
+    const blitz::Array<double,2>& gamma_a = machine.getAddGamma(v_ar[i].extent(0));
+    blitz::Range r_hi(0, m_dim_f-1);
+    // m_tmp_nf_2 = E(h_i) = gamma_A  sum_j F^T.beta.(x_sj-mu)
+    bob::math::prod(gamma_a, m_tmp_nf_1, m_tmp_nf_2);
+
+    // 1/b/ Precomputes: m_tmp_D_2 = F.E{h_i}
+    bob::math::prod(F, m_tmp_nf_2, m_tmp_D_2);
+
+    // 2/ First and second order statistics of z
+    // Precomputed values
+    blitz::Array<double,2>& zeta_a = m_cache_zeta[v_ar[i].extent(0)];
+    blitz::Array<double,2>& iota_a = m_cache_iota[v_ar[i].extent(0)];
+    blitz::Array<double,2> iotat_a = iota_a.transpose(1,0);
+
+    // Extracts statistics of z_ij = [h_i w_ij] from y_i = [h_i w_i1 ... w_iJ]
+    blitz::Range r1(0, m_dim_f-1);
+    blitz::Range r2(m_dim_f, m_dim_f+m_dim_g-1);
+    for (int j=0; j<v_ar[i].extent(0); ++j)
+    {
+      // 1/ First order statistics of z
+      blitz::Array<double,1> z_first_order_ij_1 = m_cache_z_first_order[i](j,r1);
+      z_first_order_ij_1 = m_tmp_nf_2; // E{h_i}
+      // m_tmp_D_1 = x_sj - mu - F.E{h_i}
+      m_tmp_D_1 = v_ar[i](j,a) - mu - m_tmp_D_2;
+      // m_tmp_ng_1 = G^T.sigma^-1.(x_sj-mu-fhi)
+      bob::math::prod(GtISigma, m_tmp_D_1, m_tmp_ng_1);
+      // z_first_order_ij_2 = (Id+G^T.sigma^-1.G)^-1.G^T.sigma^-1.(x_sj-mu) = E{w_ij}
+      blitz::Array<double,1> z_first_order_ij_2 = m_cache_z_first_order[i](j,r2);
+      bob::math::prod(alpha, m_tmp_ng_1, z_first_order_ij_2);
+
+      // 2/ Second order statistics of z
+      blitz::Array<double,2> z_sum_so_11 = m_cache_sum_z_second_order(r1,r1);
+      blitz::Array<double,2> z_sum_so_12 = m_cache_sum_z_second_order(r1,r2);
+      blitz::Array<double,2> z_sum_so_21 = m_cache_sum_z_second_order(r2,r1);
+      blitz::Array<double,2> z_sum_so_22 = m_cache_sum_z_second_order(r2,r2);
+      if (m_use_sum_second_order)
+      {
+        z_sum_so_11 += gamma_a + z_first_order_ij_1(bi) * z_first_order_ij_1(bj);
+        z_sum_so_12 += iota_a + z_first_order_ij_1(bi) * z_first_order_ij_2(bj);
+        z_sum_so_21 += iotat_a + z_first_order_ij_2(bi) * z_first_order_ij_1(bj);
+        z_sum_so_22 += zeta_a + z_first_order_ij_2(bi) * z_first_order_ij_2(bj);
+      }
+      else
+      {
+        blitz::Array<double,2> z_so_11 = m_cache_z_second_order[i](j,r1,r1);
+        z_so_11 = gamma_a + z_first_order_ij_1(bi) * z_first_order_ij_1(bj);
+        z_sum_so_11 += z_so_11;
+        blitz::Array<double,2> z_so_12 = m_cache_z_second_order[i](j,r1,r2);
+        z_so_12 = iota_a + z_first_order_ij_1(bi) * z_first_order_ij_2(bj);
+        z_sum_so_12 += z_so_12;
+        blitz::Array<double,2> z_so_21 = m_cache_z_second_order[i](j,r2,r1);
+        z_so_21 = iotat_a + z_first_order_ij_2(bi) * z_first_order_ij_1(bj);
+        z_sum_so_21 += z_so_21;
+        blitz::Array<double,2> z_so_22 = m_cache_z_second_order[i](j,r2,r2);
+        z_so_22 = zeta_a + z_first_order_ij_2(bi) * z_first_order_ij_2(bj);
+        z_sum_so_22 += z_so_22;
+      }
+    }
+  }
+}
+
+void bob::learn::em::PLDATrainer::precomputeFromFGSigma(bob::learn::em::PLDABase& machine)
+{
+  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
+  // provide a non-const version of transpose())
+  const blitz::Array<double,2>& F = machine.getF();
+  const blitz::Array<double,2> Ft = const_cast<blitz::Array<double,2>&>(F).transpose(1,0);
+  const blitz::Array<double,2>& Gt_isigma = machine.getGtISigma();
+  const blitz::Array<double,2> Gt_isigma_t = const_cast<blitz::Array<double,2>&>(Gt_isigma).transpose(1,0);
+  const blitz::Array<double,2>& alpha = machine.getAlpha();
+
+  // Precomputes F, G and sigma-based expressions
+  bob::math::prod(Ft, Gt_isigma_t, m_cache_Ft_isigma_G);
+  bob::math::prod(m_cache_Ft_isigma_G, alpha, m_cache_eta);
+  blitz::Array<double,2> etat = m_cache_eta.transpose(1,0);
+
+  // Reinitializes all the zeta_a and iota_a
+  std::map<size_t,bool>::iterator it;
+  for (it=m_cache_n_samples_in_training.begin(); it!=m_cache_n_samples_in_training.end();
+      ++it)
+    it->second = false;
+
+  for (it=m_cache_n_samples_in_training.begin(); it!=m_cache_n_samples_in_training.end();
+      ++it)
+  {
+    size_t n_i = it->first;
+    // Precomputes zeta and iota for identities with q_i training samples,
+    // if not already done
+    if (!it->second)
+    {
+      const blitz::Array<double,2>& gamma_a = machine.getAddGamma(n_i);
+      blitz::Array<double,2>& zeta_a = m_cache_zeta[n_i];
+      blitz::Array<double,2>& iota_a = m_cache_iota[n_i];
+      bob::math::prod(gamma_a, m_cache_eta, iota_a);
+      bob::math::prod(etat, iota_a, zeta_a);
+      zeta_a += alpha;
+      iota_a = - iota_a;
+      // Now up to date
+      it->second = true;
+    }
+  }
+}
+
+void bob::learn::em::PLDATrainer::precomputeLogLike(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // Precomputes the log determinant of alpha and sigma
+  machine.precomputeLogLike();
+
+  // Precomputes the log likelihood constant term
+  std::map<size_t,bool>::iterator it;
+  for (it=m_cache_n_samples_in_training.begin();
+       it!=m_cache_n_samples_in_training.end(); ++it)
+  {
+    // Precomputes the log likelihood constant term for identities with q_i
+    // training samples, if not already done
+    machine.getAddLogLikeConstTerm(it->first);
+  }
+}
+
+
+void bob::learn::em::PLDATrainer::mStep(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  // 1/ New estimate of B = {F G}
+  updateFG(machine, v_ar);
+
+  // 2/ New estimate of Sigma
+  updateSigma(machine, v_ar);
+
+  // 3/ Precomputes new values after updating F, G and sigma
+  machine.precompute();
+  // Precomputes useful variables using current estimates of F,G, and sigma
+  precomputeFromFGSigma(machine);
+}
+
+void bob::learn::em::PLDATrainer::updateFG(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  /// Computes the B matrix (B = [F G])
+  /// B = (sum_ij (x_ij-mu).E{z_i}^T).(sum_ij E{z_i.z_i^T})^-1
+
+  // 1/ Computes the numerator (sum_ij (x_ij-mu).E{z_i}^T)
+  // Gets the mean mu from the machine
+  const blitz::Array<double,1>& mu = machine.getMu();
+  blitz::Range a = blitz::Range::all();
+  m_tmp_D_nfng_2 = 0.;
+  for (size_t i=0; i<v_ar.size(); ++i)
+  {
+    // Loop over the samples
+    for (int j=0; j<v_ar[i].extent(0); ++j)
+    {
+      // m_tmp_D_1 = x_sj-mu
+      m_tmp_D_1 = v_ar[i](j,a) - mu;
+      // z_first_order_ij = E{z_ij}
+      blitz::Array<double,1> z_first_order_ij = m_cache_z_first_order[i](j, a);
+      // m_tmp_D_nfng_1 = (x_sj-mu).E{z_ij}^T
+      bob::math::prod(m_tmp_D_1, z_first_order_ij, m_tmp_D_nfng_1);
+      m_tmp_D_nfng_2 += m_tmp_D_nfng_1;
+    }
+  }
+
+  // 2/ Computes the denominator inv(sum_ij E{z_i.z_i^T})
+  bob::math::inv(m_cache_sum_z_second_order, m_tmp_nfng_nfng);
+
+  // 3/ Computes numerator / denominator
+  bob::math::prod(m_tmp_D_nfng_2, m_tmp_nfng_nfng, m_cache_B);
+
+  // 4/ Updates the machine
+  blitz::Array<double, 2>& F = machine.updateF();
+  blitz::Array<double, 2>& G = machine.updateG();
+  F = m_cache_B(a, blitz::Range(0, m_dim_f-1));
+  G = m_cache_B(a, blitz::Range(m_dim_f, m_dim_f+m_dim_g-1));
+}
+
+void bob::learn::em::PLDATrainer::updateSigma(bob::learn::em::PLDABase& machine,
+  const std::vector<blitz::Array<double,2> >& v_ar)
+{
+  /// Computes the Sigma matrix
+  /// Sigma = 1/IJ sum_ij Diag{(x_ij-mu).(x_ij-mu)^T - B.E{z_i}.(x_ij-mu)^T}
+
+  // Gets the mean mu and the matrix sigma from the machine
+  blitz::Array<double,1>& sigma = machine.updateSigma();
+  const blitz::Array<double,1>& mu = machine.getMu();
+  blitz::Range a = blitz::Range::all();
+
+  sigma = 0.;
+  size_t n_IJ=0; /// counts the number of samples
+  for (size_t i=0; i<v_ar.size(); ++i)
+  {
+    // Loop over the samples
+    for (int j=0; j<v_ar[i].extent(0); ++j)
+    {
+      // m_tmp_D_1 = x_ij-mu
+      m_tmp_D_1 = v_ar[i](j,a) - mu;
+      // sigma += Diag{(x_ij-mu).(x_ij-mu)^T}
+      sigma += blitz::pow2(m_tmp_D_1);
+
+      // z_first_order_ij = E{z_ij}
+      blitz::Array<double,1> z_first_order_ij = m_cache_z_first_order[i](j,a);
+      // m_tmp_D_2 = B.E{z_ij}
+      bob::math::prod(m_cache_B, z_first_order_ij, m_tmp_D_2);
+      // sigma -= Diag{B.E{z_ij}.(x_ij-mu)
+      sigma -= (m_tmp_D_1 * m_tmp_D_2);
+      ++n_IJ;
+    }
+  }
+  // Normalizes by the number of samples
+  sigma /= static_cast<double>(n_IJ);
+  // Apply variance threshold
+  machine.applyVarianceThreshold();
+}
+
+
+void bob::learn::em::PLDATrainer::enrol(bob::learn::em::PLDAMachine& plda_machine,
+  const blitz::Array<double,2>& ar) const
+{
+  // Gets dimension
+  const size_t dim_d = ar.extent(1);
+  const int n_samples = ar.extent(0);
+  // Compare the dimensionality from the base trainer/machine with the one
+  // of the enrollment samples
+  if (plda_machine.getDimD() != dim_d) {
+    boost::format m("the extent of the D dimension of the input machine (%u) does not match the input sample (%u)");
+    m % plda_machine.getDimD() % dim_d;
+    throw std::runtime_error(m.str());
+  }
+  const size_t dim_f = plda_machine.getDimF();
+
+  // Resize working arrays
+  m_tmp_D_1.resize(dim_d);
+  m_tmp_D_2.resize(dim_d);
+  m_tmp_nf_1.resize(dim_f);
+
+  // Useful values from the base machine
+  blitz::Array<double,1>& weighted_sum = plda_machine.updateWeightedSum();
+  const blitz::Array<double,1>& mu = plda_machine.getPLDABase()->getMu();
+  const blitz::Array<double,2>& beta = plda_machine.getPLDABase()->getBeta();
+  const blitz::Array<double,2>& FtBeta = plda_machine.getPLDABase()->getFtBeta();
+
+  // Updates the PLDA machine
+  plda_machine.setNSamples(n_samples);
+  double terma = 0.;
+  weighted_sum = 0.;
+  blitz::Range a = blitz::Range::all();
+  for (int i=0; i<n_samples; ++i) {
+    m_tmp_D_1 =  ar(i,a) - mu;
+    // a/ weighted sum
+    bob::math::prod(FtBeta, m_tmp_D_1, m_tmp_nf_1);
+    weighted_sum += m_tmp_nf_1;
+    // b/ first xi dependent term of the log likelihood
+    bob::math::prod(beta, m_tmp_D_1, m_tmp_D_2);
+    terma += -1 / 2. * blitz::sum(m_tmp_D_1 * m_tmp_D_2);
+  }
+  plda_machine.setWSumXitBetaXi(terma);
+
+  // Adds the precomputed values for the cases N and N+1 if not already
+  // in the base machine (used by the forward function, 1 already added)
+  plda_machine.getAddGamma(n_samples);
+  plda_machine.getAddLogLikeConstTerm(n_samples);
+  plda_machine.getAddGamma(n_samples+1);
+  plda_machine.getAddLogLikeConstTerm(n_samples+1);
+  plda_machine.setLogLikelihood(plda_machine.computeLogLikelihood(
+                                  blitz::Array<double,2>(0,dim_d),true));
+}
diff --git a/bob/learn/em/cpp/ZTNorm.cpp b/bob/learn/em/cpp/ZTNorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..707b9a1c93b1a18c3726674a49b4382446a5e7cb
--- /dev/null
+++ b/bob/learn/em/cpp/ZTNorm.cpp
@@ -0,0 +1,182 @@
+/**
+ * @date Tue Jul 19 15:33:20 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.em/ZTNorm.h>
+#include <bob.core/assert.h>
+#include <limits>
+
+
+static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+            const blitz::Array<double,2>* rawscores_zprobes_vs_models,
+            const blitz::Array<double,2>* rawscores_probes_vs_tmodels,
+            const blitz::Array<double,2>* rawscores_zprobes_vs_tmodels,
+            const blitz::Array<bool,2>* mask_zprobes_vs_tmodels_istruetrial,
+            blitz::Array<double,2>& scores)
+{
+  // Rename variables
+  const blitz::Array<double,2>& A = rawscores_probes_vs_models;
+  const blitz::Array<double,2>* B = rawscores_zprobes_vs_models;
+  const blitz::Array<double,2>* C = rawscores_probes_vs_tmodels;
+  const blitz::Array<double,2>* D = rawscores_zprobes_vs_tmodels;
+
+  // Compute the sizes
+  int size_eval  = A.extent(0);
+  int size_enrol = A.extent(1);
+  int size_tnorm = (C ? C->extent(0) : 0);
+  int size_znorm = (B ? B->extent(1) : 0);
+
+  // Check the inputs
+  bob::core::array::assertSameDimensionLength(A.extent(0), size_eval);
+  bob::core::array::assertSameDimensionLength(A.extent(1), size_enrol);
+
+  if (B) {
+    bob::core::array::assertSameDimensionLength(B->extent(1), size_znorm);
+    if (size_znorm > 0)
+      bob::core::array::assertSameDimensionLength(B->extent(0), size_eval);
+  }
+
+  if (C) {
+    bob::core::array::assertSameDimensionLength(C->extent(0), size_tnorm);
+    if (size_tnorm > 0)
+      bob::core::array::assertSameDimensionLength(C->extent(1), size_enrol);
+  }
+
+  if (D && size_znorm > 0 && size_tnorm > 0) {
+    bob::core::array::assertSameDimensionLength(D->extent(0), size_tnorm);
+    bob::core::array::assertSameDimensionLength(D->extent(1), size_znorm);
+  }
+
+  if (mask_zprobes_vs_tmodels_istruetrial) {
+    bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(0), size_tnorm);
+    bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(1), size_znorm);
+  }
+
+  bob::core::array::assertSameDimensionLength(scores.extent(0), size_eval);
+  bob::core::array::assertSameDimensionLength(scores.extent(1), size_enrol);
+
+  // Declare needed IndexPlaceholder
+  blitz::firstIndex ii;
+  blitz::secondIndex jj;
+
+  // Constant to check if the std is close to 0.
+  const double eps = std::numeric_limits<double>::min();
+
+  // zA
+  blitz::Array<double,2> zA(A.shape());
+  if (B && size_znorm > 0) {
+    // Znorm  -->      zA  = (A - mean(B) ) / std(B)    [znorm on oringinal scores]
+    // mean(B)
+    blitz::Array<double,1> mean_B(blitz::mean(*B, jj));
+    // std(B)
+    blitz::Array<double,2> B2n(B->shape());
+    B2n = blitz::pow2((*B)(ii, jj) - mean_B(ii));
+    blitz::Array<double,1> std_B(B->extent(0));
+    if(size_znorm>1)
+      std_B = blitz::sqrt(blitz::sum(B2n, jj) / (size_znorm - 1));
+    else // 1 single value -> std = 0
+      std_B = 0;
+    std_B = blitz::where( std_B <= eps, 1., std_B);
+
+    zA = (A(ii, jj) - mean_B(ii)) / std_B(ii);
+  }
+  else
+    zA = A;
+
+  blitz::Array<double,2> zC(size_tnorm, size_enrol);
+  if (D && size_tnorm > 0 && size_znorm > 0) {
+    blitz::Array<double,1> mean_Dimp(size_tnorm);
+    blitz::Array<double,1> std_Dimp(size_tnorm);
+
+    // Compute mean_Dimp and std_Dimp = D only with impostors
+    for (int i = 0; i < size_tnorm; ++i) {
+      double sum = 0;
+      double sumsq = 0;
+      double count = 0;
+      for (int j = 0; j < size_znorm; ++j) {
+        bool keep;
+        // The second part is never executed if mask_zprobes_vs_tmodels_istruetrial==NULL
+        keep = (mask_zprobes_vs_tmodels_istruetrial == NULL) || !(*mask_zprobes_vs_tmodels_istruetrial)(i, j); //tnorm_models_spk_ids(i) != znorm_tests_spk_ids(j);
+
+        double value = keep * (*D)(i, j);
+        sum += value;
+        sumsq += value*value;
+        count += keep;
+      }
+
+      double mean = sum / count;
+      mean_Dimp(i) = mean;
+      if (count > 1)
+        std_Dimp(i) = sqrt((sumsq - count * mean * mean) / (count -1));
+      else // 1 single value -> std = 0
+        std_Dimp(i) = 0;
+    }
+
+    // zC  = (C - mean(D)) / std(D)     [znorm the tnorm scores]
+    std_Dimp = blitz::where( std_Dimp <= eps, 1., std_Dimp);
+    zC = ((*C)(ii, jj) - mean_Dimp(ii)) / std_Dimp(ii);
+  }
+  else if (C && size_tnorm > 0)
+    zC = *C;
+
+  if (C && size_tnorm > 0)
+  {
+    blitz::Array<double,1> mean_zC(size_enrol);
+    blitz::Array<double,1> std_zC(size_enrol);
+
+    // ztA = (zA - mean(zC)) / std(zC)  [ztnorm on eval scores]
+    mean_zC = blitz::mean(zC(jj, ii), jj);
+    if (size_tnorm > 1)
+      std_zC = sqrt(blitz::sum(pow(zC(jj, ii) - mean_zC(ii), 2) , jj) / (size_tnorm - 1));
+    else // 1 single value -> std = 0
+      std_zC = 0;
+    std_zC = blitz::where( std_zC <= eps, 1., std_zC);
+
+    // Normalised scores
+    scores = (zA(ii, jj) - mean_zC(jj)) /  std_zC(jj);
+  }
+  else
+    scores = zA;
+}
+
+void bob::learn::em::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
+            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
+            const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
+            const blitz::Array<bool,2>& mask_zprobes_vs_tmodels_istruetrial,
+            blitz::Array<double,2>& scores)
+{
+  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
+                 &rawscores_zprobes_vs_tmodels, &mask_zprobes_vs_tmodels_istruetrial, scores);
+}
+
+void bob::learn::em::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
+            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
+            const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
+            blitz::Array<double,2>& scores)
+{
+  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
+                 &rawscores_zprobes_vs_tmodels, NULL, scores);
+}
+
+void bob::learn::em::tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+           const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
+           blitz::Array<double,2>& scores)
+{
+  _ztNorm(rawscores_probes_vs_models, NULL, &rawscores_probes_vs_tmodels,
+                 NULL, NULL, scores);
+}
+
+void bob::learn::em::zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+           const blitz::Array<double,2>& rawscores_zprobes_vs_models,
+           blitz::Array<double,2>& scores)
+{
+  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, NULL,
+                 NULL, NULL, scores);
+}
+
diff --git a/bob/learn/em/data/data.hdf5 b/bob/learn/em/data/data.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..7c406233e19c49f5ab6e2c26d32257fc4e47e54f
Binary files /dev/null and b/bob/learn/em/data/data.hdf5 differ
diff --git a/bob/learn/em/data/dataNormalized.hdf5 b/bob/learn/em/data/dataNormalized.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..ac8d0302ebb312d35a8ee43c1c6195b899643733
Binary files /dev/null and b/bob/learn/em/data/dataNormalized.hdf5 differ
diff --git a/bob/learn/em/data/dataforMAP.hdf5 b/bob/learn/em/data/dataforMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..9cd7bfe8533daab0a21ae20d342281ecf1afa977
Binary files /dev/null and b/bob/learn/em/data/dataforMAP.hdf5 differ
diff --git a/bob/learn/em/data/faithful.torch3.hdf5 b/bob/learn/em/data/faithful.torch3.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..a508318e6e8bcc528674ab7f9e3594f73ddb8367
Binary files /dev/null and b/bob/learn/em/data/faithful.torch3.hdf5 differ
diff --git a/bob/learn/em/data/faithful.torch3_f64.hdf5 b/bob/learn/em/data/faithful.torch3_f64.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..fe7f81b2bce427f6ab367cb6c7a2a6c1524e0528
Binary files /dev/null and b/bob/learn/em/data/faithful.torch3_f64.hdf5 differ
diff --git a/bob/learn/em/data/gmm.init_means.hdf5 b/bob/learn/em/data/gmm.init_means.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..0b88738f3883e9b20c8eea20e2b278bf364498b4
Binary files /dev/null and b/bob/learn/em/data/gmm.init_means.hdf5 differ
diff --git a/bob/learn/em/data/gmm.init_variances.hdf5 b/bob/learn/em/data/gmm.init_variances.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..d0687a2ffc6bab5ea8b111c60cd112730af9b758
Binary files /dev/null and b/bob/learn/em/data/gmm.init_variances.hdf5 differ
diff --git a/bob/learn/em/data/gmm.init_weights.hdf5 b/bob/learn/em/data/gmm.init_weights.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..558faa66b67f5deb0550d2543372667ff45f1e70
Binary files /dev/null and b/bob/learn/em/data/gmm.init_weights.hdf5 differ
diff --git a/bob/learn/em/data/gmm_MAP.hdf5 b/bob/learn/em/data/gmm_MAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..91c5e69141e3042ef5d211fc4098a8d59649d62d
Binary files /dev/null and b/bob/learn/em/data/gmm_MAP.hdf5 differ
diff --git a/bob/learn/em/data/gmm_ML.hdf5 b/bob/learn/em/data/gmm_ML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..238cd7e14f5e4ab92e505221f200cdba368cb593
Binary files /dev/null and b/bob/learn/em/data/gmm_ML.hdf5 differ
diff --git a/bob/learn/em/data/gmm_ML_32bit_debug.hdf5 b/bob/learn/em/data/gmm_ML_32bit_debug.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..275381b7e7573e060009a15220f092cfa323a1eb
Binary files /dev/null and b/bob/learn/em/data/gmm_ML_32bit_debug.hdf5 differ
diff --git a/bob/learn/em/data/gmm_ML_32bit_release.hdf5 b/bob/learn/em/data/gmm_ML_32bit_release.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..438e9932cecf179d1b834e2f5c19d39a7c906cf3
Binary files /dev/null and b/bob/learn/em/data/gmm_ML_32bit_release.hdf5 differ
diff --git a/bob/learn/em/data/means.hdf5 b/bob/learn/em/data/means.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..060afde0fb2777065d02c85baf8a34ec1d509fea
Binary files /dev/null and b/bob/learn/em/data/means.hdf5 differ
diff --git a/bob/learn/em/data/meansAfterKMeans.hdf5 b/bob/learn/em/data/meansAfterKMeans.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..9552dd832998ee19062e4c0b28b335691af25269
Binary files /dev/null and b/bob/learn/em/data/meansAfterKMeans.hdf5 differ
diff --git a/bob/learn/em/data/meansAfterMAP.hdf5 b/bob/learn/em/data/meansAfterMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..ac4cb9383d10c223b26d16d686910c430cf71197
Binary files /dev/null and b/bob/learn/em/data/meansAfterMAP.hdf5 differ
diff --git a/bob/learn/em/data/meansAfterML.hdf5 b/bob/learn/em/data/meansAfterML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..857bbe39c29cbb76f18aed3798ce484ed2bcb67d
Binary files /dev/null and b/bob/learn/em/data/meansAfterML.hdf5 differ
diff --git a/bob/learn/em/data/new_adapted_mean.hdf5 b/bob/learn/em/data/new_adapted_mean.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..fc4a8ee30af0d8531302133b2bd2595df07139b8
Binary files /dev/null and b/bob/learn/em/data/new_adapted_mean.hdf5 differ
diff --git a/bob/learn/em/data/samplesFrom2G_f64.hdf5 b/bob/learn/em/data/samplesFrom2G_f64.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..9ef47fd649fde13d36a15a6ebde122c31047b31b
Binary files /dev/null and b/bob/learn/em/data/samplesFrom2G_f64.hdf5 differ
diff --git a/bob/learn/em/data/stats.hdf5 b/bob/learn/em/data/stats.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..c4a13700ec20079fdaacbd3841e8289910e9dd82
Binary files /dev/null and b/bob/learn/em/data/stats.hdf5 differ
diff --git a/bob/learn/em/data/variances.hdf5 b/bob/learn/em/data/variances.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..c9d6d17bcf73be3bb7800d14604a0201b16f4ada
Binary files /dev/null and b/bob/learn/em/data/variances.hdf5 differ
diff --git a/bob/learn/em/data/variancesAfterKMeans.hdf5 b/bob/learn/em/data/variancesAfterKMeans.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..2aee23c0ef021e383d34ef2ca47175ecf165a6e9
Binary files /dev/null and b/bob/learn/em/data/variancesAfterKMeans.hdf5 differ
diff --git a/bob/learn/em/data/variancesAfterMAP.hdf5 b/bob/learn/em/data/variancesAfterMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..47bd4d5f823882eb7de61f3b67946c81acc0e82f
Binary files /dev/null and b/bob/learn/em/data/variancesAfterMAP.hdf5 differ
diff --git a/bob/learn/em/data/variancesAfterML.hdf5 b/bob/learn/em/data/variancesAfterML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..472229290b53eb34728dac334a7addb635d314a0
Binary files /dev/null and b/bob/learn/em/data/variancesAfterML.hdf5 differ
diff --git a/bob/learn/em/data/weights.hdf5 b/bob/learn/em/data/weights.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..41b22801f28b4afc8b3a81daf5e594e85100f29f
Binary files /dev/null and b/bob/learn/em/data/weights.hdf5 differ
diff --git a/bob/learn/em/data/weightsAfterKMeans.hdf5 b/bob/learn/em/data/weightsAfterKMeans.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..b241207eac61c8f47dcb0fafed293748108ba6d8
Binary files /dev/null and b/bob/learn/em/data/weightsAfterKMeans.hdf5 differ
diff --git a/bob/learn/em/data/weightsAfterMAP.hdf5 b/bob/learn/em/data/weightsAfterMAP.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..b6e1f0497f91dfc26e137fc021f02431023db1a7
Binary files /dev/null and b/bob/learn/em/data/weightsAfterMAP.hdf5 differ
diff --git a/bob/learn/em/data/weightsAfterML.hdf5 b/bob/learn/em/data/weightsAfterML.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..0b3fc2551fc9d1eff310c7cd7c0a5e33d926f0e7
Binary files /dev/null and b/bob/learn/em/data/weightsAfterML.hdf5 differ
diff --git a/bob/learn/em/data/ztnorm_eval_eval.hdf5 b/bob/learn/em/data/ztnorm_eval_eval.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..bc5771861bd444b1ba3d89c6c949e91e912136cf
Binary files /dev/null and b/bob/learn/em/data/ztnorm_eval_eval.hdf5 differ
diff --git a/bob/learn/em/data/ztnorm_eval_tnorm.hdf5 b/bob/learn/em/data/ztnorm_eval_tnorm.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..d98b4d656bbe0a8c75a675ccc09834b30bc4bd00
Binary files /dev/null and b/bob/learn/em/data/ztnorm_eval_tnorm.hdf5 differ
diff --git a/bob/learn/em/data/ztnorm_result.hdf5 b/bob/learn/em/data/ztnorm_result.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..877c72c77fedb11fe7d39791823f42e58cce1e1c
Binary files /dev/null and b/bob/learn/em/data/ztnorm_result.hdf5 differ
diff --git a/bob/learn/em/data/ztnorm_znorm_eval.hdf5 b/bob/learn/em/data/ztnorm_znorm_eval.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..5d670ae0b3bd410c40e70b4697f0531fdc7bfb87
Binary files /dev/null and b/bob/learn/em/data/ztnorm_znorm_eval.hdf5 differ
diff --git a/bob/learn/em/data/ztnorm_znorm_tnorm.hdf5 b/bob/learn/em/data/ztnorm_znorm_tnorm.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..e2f709ed9a33a014f64e6c69f15c3e549dc7e3ca
Binary files /dev/null and b/bob/learn/em/data/ztnorm_znorm_tnorm.hdf5 differ
diff --git a/bob/learn/em/empca_trainer.cpp b/bob/learn/em/empca_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..127a1d78add2e4214b68c0b8e02ae3794863c83c
--- /dev/null
+++ b/bob/learn/em/empca_trainer.cpp
@@ -0,0 +1,378 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Tue 03 Fev 11:22:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto EMPCATrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX "._EMPCATrainer",
+  ""
+
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a EMPCATrainer",
+    "",
+    true
+  )
+  .add_prototype("convergence_threshold","")
+  .add_prototype("other","")
+  .add_prototype("","")
+
+  .add_parameter("other", ":py:class:`bob.learn.em.EMPCATrainer`", "A EMPCATrainer object to be copied.")
+  .add_parameter("convergence_threshold", "double", "")
+
+);
+
+
+static int PyBobLearnEMEMPCATrainer_init_copy(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = EMPCATrainer_doc.kwlist(1);
+  PyBobLearnEMEMPCATrainerObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMEMPCATrainer_Type, &tt)){
+    EMPCATrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::EMPCATrainer(*tt->cxx));
+  return 0;
+}
+
+static int PyBobLearnEMEMPCATrainer_init_number(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = EMPCATrainer_doc.kwlist(0);
+  double convergence_threshold    = 0.0001;
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &convergence_threshold))
+    return -1;
+
+  if(convergence_threshold < 0){
+    PyErr_Format(PyExc_TypeError, "convergence_threshold argument must be greater than to zero");
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::EMPCATrainer(convergence_threshold));
+  return 0;
+}
+
+static int PyBobLearnEMEMPCATrainer_init(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch (nargs) {
+
+    case 0:{ //default initializer ()
+      self->cxx.reset(new bob::learn::em::EMPCATrainer());
+      return 0;
+    }
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is EMPCATrainer object
+      if (PyBobLearnEMEMPCATrainer_Check(arg))
+        return PyBobLearnEMEMPCATrainer_init_copy(self, args, kwargs);
+      else if(PyString_Check(arg))
+        return PyBobLearnEMEMPCATrainer_init_number(self, args, kwargs);
+    }
+    default:{
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      EMPCATrainer_doc.print_usage();
+      return -1;
+    }
+  }
+  BOB_CATCH_MEMBER("cannot create EMPCATrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMEMPCATrainer_delete(PyBobLearnEMEMPCATrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMEMPCATrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMEMPCATrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMEMPCATrainer_RichCompare(PyBobLearnEMEMPCATrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMEMPCATrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMEMPCATrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare EMPCATrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+
+/***** rng *****/
+static auto rng = bob::extension::VariableDoc(
+  "rng",
+  "str",
+  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
+  ""
+);
+PyObject* PyBobLearnEMEMPCATrainer_getRng(PyBobLearnEMEMPCATrainerObject* self, void*) {
+  BOB_TRY
+  //Allocating the correspondent python object
+  
+  PyBoostMt19937Object* retval =
+    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
+
+  retval->rng = self->cxx->getRng().get();
+  return Py_BuildValue("O", retval);
+  BOB_CATCH_MEMBER("Rng method could not be read", 0)
+}
+int PyBobLearnEMEMPCATrainer_setRng(PyBobLearnEMEMPCATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBoostMt19937_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
+    return -1;
+  }
+
+  PyBoostMt19937Object* boostObject = 0;
+  PyBoostMt19937_Converter(value, &boostObject);
+  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
+
+  return 0;
+  BOB_CATCH_MEMBER("Rng could not be set", 0)
+}
+
+
+
+static PyGetSetDef PyBobLearnEMEMPCATrainer_getseters[] = { 
+  {
+   rng.name(),
+   (getter)PyBobLearnEMEMPCATrainer_getRng,
+   (setter)PyBobLearnEMEMPCATrainer_setRng,
+   rng.doc(),
+   0
+  },
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "",
+  "",
+  true
+)
+.add_prototype("linear_machine,data")
+.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnLinearMachineObject* linear_machine = 0;
+  PyBlitzArrayObject* data                          = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->initialize(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** eStep ***/
+static auto eStep = bob::extension::FunctionDoc(
+  "eStep",
+  "",
+  "",
+  true
+)
+.add_prototype("linear_machine,data")
+.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMEMPCATrainer_eStep(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = eStep.kwlist(0);
+
+  PyBobLearnLinearMachineObject* linear_machine;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->eStep(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+
+  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** mStep ***/
+static auto mStep = bob::extension::FunctionDoc(
+  "mStep",
+  "",
+  0,
+  true
+)
+.add_prototype("linear_machine,data")
+.add_parameter("linear_machine", ":py:class:`bob.learn.em.LinearMachine`", "LinearMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMEMPCATrainer_mStep(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = mStep.kwlist(0);
+
+  PyBobLearnLinearMachineObject* linear_machine;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->mStep(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+
+  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** computeLikelihood ***/
+static auto compute_likelihood = bob::extension::FunctionDoc(
+  "compute_likelihood",
+  "",
+  0,
+  true
+)
+.add_prototype("linear_machine,data")
+.add_parameter("linear_machine", ":py:class:`bob.learn.em.LinearMachine`", "LinearMachine Object");
+static PyObject* PyBobLearnEMEMPCATrainer_compute_likelihood(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = compute_likelihood.kwlist(0);
+
+  PyBobLearnLinearMachineObject* linear_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine)) Py_RETURN_NONE;
+
+  double value = self->cxx->computeLikelihood(*linear_machine->cxx);
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
+}
+
+
+
+static PyMethodDef PyBobLearnEMEMPCATrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMEMPCATrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    eStep.name(),
+    (PyCFunction)PyBobLearnEMEMPCATrainer_eStep,
+    METH_VARARGS|METH_KEYWORDS,
+    eStep.doc()
+  },
+  {
+    mStep.name(),
+    (PyCFunction)PyBobLearnEMEMPCATrainer_mStep,
+    METH_VARARGS|METH_KEYWORDS,
+    mStep.doc()
+  },
+  {
+    compute_likelihood.name(),
+    (PyCFunction)PyBobLearnEMEMPCATrainer_compute_likelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_likelihood.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMEMPCATrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMEMPCATrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMEMPCATrainer_Type.tp_name = EMPCATrainer_doc.name();
+  PyBobLearnEMEMPCATrainer_Type.tp_basicsize = sizeof(PyBobLearnEMEMPCATrainerObject);
+  PyBobLearnEMEMPCATrainer_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
+  PyBobLearnEMEMPCATrainer_Type.tp_doc = EMPCATrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMEMPCATrainer_Type.tp_new = PyType_GenericNew;
+  PyBobLearnEMEMPCATrainer_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnEMEMPCATrainer_init);
+  PyBobLearnEMEMPCATrainer_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnEMEMPCATrainer_delete);
+  PyBobLearnEMEMPCATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMEMPCATrainer_RichCompare);
+  PyBobLearnEMEMPCATrainer_Type.tp_methods = PyBobLearnEMEMPCATrainer_methods;
+  PyBobLearnEMEMPCATrainer_Type.tp_getset = PyBobLearnEMEMPCATrainer_getseters;
+  PyBobLearnEMEMPCATrainer_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMEMPCATrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMEMPCATrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMEMPCATrainer_Type);
+  return PyModule_AddObject(module, "_EMPCATrainer", (PyObject*)&PyBobLearnEMEMPCATrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/gaussian.cpp b/bob/learn/em/gaussian.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..86f49301c1121e8afcba2983028897924b676a2e
--- /dev/null
+++ b/bob/learn/em/gaussian.cpp
@@ -0,0 +1,571 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Fri 21 Nov 10:38:48 2013
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto Gaussian_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".Gaussian",
+  "This class implements a multivariate diagonal Gaussian distribution"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructs a new multivariate gaussian object",
+    "",
+    true
+  )
+  .add_prototype("n_inputs","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+  .add_prototype("","")
+
+  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
+  .add_parameter("other", ":py:class:`bob.learn.em.GMMStats`", "A GMMStats object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+);
+
+
+
+static int PyBobLearnEMGaussian_init_number(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = Gaussian_doc.kwlist(0);
+  int n_inputs=1;
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &n_inputs))
+    return -1;
+
+  if(n_inputs < 0){
+    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
+    Gaussian_doc.print_usage();
+    return -1;
+   }
+
+  self->cxx.reset(new bob::learn::em::Gaussian(n_inputs));
+  return 0;
+}
+
+static int PyBobLearnEMGaussian_init_copy(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = Gaussian_doc.kwlist(1);
+  PyBobLearnEMGaussianObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGaussian_Type, &tt)){
+    Gaussian_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::Gaussian(*tt->cxx));
+  return 0;
+}
+
+static int PyBobLearnEMGaussian_init_hdf5(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = Gaussian_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    Gaussian_doc.print_usage();
+    return -1;
+  }
+  
+  try {
+    self->cxx.reset(new bob::learn::em::Gaussian(*(config->f)));
+  }
+  catch (std::exception& ex) {
+    PyErr_SetString(PyExc_RuntimeError, ex.what());
+    return -1;
+  }
+  catch (...) {
+    PyErr_Format(PyExc_RuntimeError, "cannot create new object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
+    return -1;
+  }
+
+  return 0;
+}
+
+
+static int PyBobLearnEMGaussian_init(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+
+  // get the number of command line arguments
+  Py_ssize_t nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+  if (nargs==0){
+    self->cxx.reset(new bob::learn::em::Gaussian());
+    return 0;
+  }
+
+  //Reading the input argument
+  PyObject* arg = 0;
+  if (PyTuple_Size(args)) 
+    arg = PyTuple_GET_ITEM(args, 0);
+  else {
+    PyObject* tmp = PyDict_Values(kwargs);
+    auto tmp_ = make_safe(tmp);
+    arg = PyList_GET_ITEM(tmp, 0);
+  }
+
+  /**If the constructor input is a number**/
+  if (PyNumber_Check(arg)) 
+    return PyBobLearnEMGaussian_init_number(self, args, kwargs);
+  /**If the constructor input is Gaussian object**/
+  else if (PyBobLearnEMGaussian_Check(arg))
+    return PyBobLearnEMGaussian_init_copy(self, args, kwargs);
+  /**If the constructor input is a HDF5**/
+  else if (PyBobIoHDF5File_Check(arg))
+    return PyBobLearnEMGaussian_init_hdf5(self, args, kwargs);
+  else
+    PyErr_Format(PyExc_TypeError, "invalid input argument");
+    Gaussian_doc.print_usage();
+    return -1;
+
+  BOB_CATCH_MEMBER("cannot create Gaussian", -1)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMGaussian_delete(PyBobLearnEMGaussianObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMGaussian_RichCompare(PyBobLearnEMGaussianObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMGaussian_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMGaussianObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare Gaussian objects", 0)
+}
+
+int PyBobLearnEMGaussian_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMGaussian_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** MEAN *****/
+static auto mean = bob::extension::VariableDoc(
+  "mean",
+  "array_like <double, 1D>",
+  "Mean of the Gaussian",
+  ""
+);
+PyObject* PyBobLearnEMGaussian_getMean(PyBobLearnEMGaussianObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMean());
+  BOB_CATCH_MEMBER("mean could not be read", 0)
+}
+int PyBobLearnEMGaussian_setMean(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, mean.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mean");
+  if (!b) return -1;
+  self->cxx->setMean(*b);
+  return 0;
+  BOB_CATCH_MEMBER("mean could not be set", -1)
+}
+
+/***** Variance *****/
+static auto variance = bob::extension::VariableDoc(
+  "variance",
+  "array_like <double, 1D>",
+  "Variance of the Gaussian",
+  ""
+);
+PyObject* PyBobLearnEMGaussian_getVariance(PyBobLearnEMGaussianObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVariance());
+  BOB_CATCH_MEMBER("variance could not be read", 0)
+}
+int PyBobLearnEMGaussian_setVariance(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variance.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance");
+  if (!b) return -1;
+  self->cxx->setVariance(*b);
+  return 0;
+  BOB_CATCH_MEMBER("variance could not be set", -1)
+}
+
+
+/***** variance_thresholds *****/
+static auto variance_thresholds = bob::extension::VariableDoc(
+  "variance_thresholds",
+  "array_like <double, 1D>",
+  "The variance flooring thresholds, i.e. the minimum allowed value of variance in each dimension. ",
+  "The variance will be set to this value if an attempt is made to set it to a smaller value."
+);
+PyObject* PyBobLearnEMGaussian_getVarianceThresholds(PyBobLearnEMGaussianObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVarianceThresholds());
+  BOB_CATCH_MEMBER("variance_thresholds could not be read", 0)
+}
+int PyBobLearnEMGaussian_setVarianceThresholds(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, variance_thresholds.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance_thresholds");
+  if (!b) return -1;
+  self->cxx->setVarianceThresholds(*b);
+  return 0;
+  BOB_CATCH_MEMBER("variance_thresholds could not be set", -1)  
+}
+
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int)",
+  "A tuple that represents the dimensionality of the Gaussian ``(dim,)``.",
+  ""
+);
+PyObject* PyBobLearnEMGaussian_getShape(PyBobLearnEMGaussianObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i)", self->cxx->getNInputs());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+static PyGetSetDef PyBobLearnEMGaussian_getseters[] = {
+    {
+      mean.name(),
+      (getter)PyBobLearnEMGaussian_getMean,
+      (setter)PyBobLearnEMGaussian_setMean,
+      mean.doc(),
+      0
+    },
+    {
+      variance.name(),
+      (getter)PyBobLearnEMGaussian_getVariance,
+      (setter)PyBobLearnEMGaussian_setVariance,
+      variance.doc(),
+     0
+     },
+     {
+      variance_thresholds.name(),
+      (getter)PyBobLearnEMGaussian_getVarianceThresholds,
+      (setter)PyBobLearnEMGaussian_setVarianceThresholds,
+      variance_thresholds.doc(),
+      0
+     },
+     {
+      shape.name(),
+      (getter)PyBobLearnEMGaussian_getShape,
+      0,
+      shape.doc(),
+      0
+     },
+
+    {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Set the input dimensionality, reset the mean to zero and the variance to one."
+)
+.add_prototype("input")
+.add_parameter("input", "int", "Dimensionality of the feature vector");
+static PyObject* PyBobLearnEMGaussian_resize(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int input = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &input)) Py_RETURN_NONE;
+  if (input <= 0){
+    PyErr_Format(PyExc_TypeError, "input must be greater than zero");
+    resize.print_usage();
+    Py_RETURN_NONE;
+  }
+  self->cxx->setNInputs(input);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+/*** log_likelihood ***/
+static auto log_likelihood = bob::extension::FunctionDoc(
+  "log_likelihood",
+  "Output the log likelihood of the sample, x. The input size is checked.",
+  ".. note:: The :py:meth:`__call__` function is an alias for this.", 
+  true
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <double, 1D>", "Input vector")
+.add_return("output","float","The log likelihood");
+static PyObject* PyBobLearnEMGaussian_loglikelihood(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = log_likelihood.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
+}
+
+
+/*** log_likelihood_ ***/
+static auto log_likelihood_ = bob::extension::FunctionDoc(
+  "log_likelihood_",
+  "Output the log likelihood given a sample. The input size is NOT checked."
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <double, 1D>", "Input vector")
+.add_return("output","double","The log likelihood");
+static PyObject* PyBobLearnEMGaussian_loglikelihood_(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  char** kwlist = log_likelihood_.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
+}
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the Gassian Machine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing")
+;
+static PyObject* PyBobLearnEMGaussian_Save(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the Gassian Machine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMGaussian_Load(PyBobLearnEMGaussianObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+  
+  BOB_CATCH_MEMBER("cannot load the data", 0)    
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this Gaussian with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` and any other values internal to this machine.",
+  true
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.Gaussian`", "A gaussian to be compared.")
+.add_parameter("[r_epsilon]", "float", "Relative precision.")
+.add_parameter("[a_epsilon]", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMGaussian_IsSimilarTo(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  PyBobLearnEMGaussianObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMGaussian_Type, &other,
+        &r_epsilon, &a_epsilon)) return 0;
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** set_variance_thresholds ***/
+static auto set_variance_thresholds = bob::extension::FunctionDoc(
+  "set_variance_thresholds",
+  "Set the variance flooring thresholds equal to the given threshold for all the dimensions."
+)
+.add_prototype("input")
+.add_parameter("input","float","Threshold")
+;
+static PyObject* PyBobLearnEMGaussian_SetVarianceThresholds(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = set_variance_thresholds.kwlist(0);
+
+  double input = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &input)) return 0;
+
+  self->cxx->setVarianceThresholds(input);
+
+  BOB_CATCH_MEMBER("cannot perform the set_variance_Thresholds method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+static PyMethodDef PyBobLearnEMGaussian_methods[] = {
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMGaussian_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  {
+    log_likelihood.name(),
+    (PyCFunction)PyBobLearnEMGaussian_loglikelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    log_likelihood.doc()
+  },
+  {
+    log_likelihood_.name(),
+    (PyCFunction)PyBobLearnEMGaussian_loglikelihood_,
+    METH_VARARGS|METH_KEYWORDS,
+    log_likelihood_.doc()
+  },
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMGaussian_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMGaussian_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMGaussian_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    set_variance_thresholds.name(),
+    (PyCFunction)PyBobLearnEMGaussian_SetVarianceThresholds,
+    METH_VARARGS|METH_KEYWORDS,
+    set_variance_thresholds.doc()
+  },
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMGaussian_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMGaussian(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMGaussian_Type.tp_name = Gaussian_doc.name();
+  PyBobLearnEMGaussian_Type.tp_basicsize = sizeof(PyBobLearnEMGaussianObject);
+  PyBobLearnEMGaussian_Type.tp_flags = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMGaussian_Type.tp_doc = Gaussian_doc.doc();
+
+  // set the functions
+  PyBobLearnEMGaussian_Type.tp_new = PyType_GenericNew;
+  PyBobLearnEMGaussian_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnEMGaussian_init);
+  PyBobLearnEMGaussian_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnEMGaussian_delete);
+  PyBobLearnEMGaussian_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMGaussian_RichCompare);
+  PyBobLearnEMGaussian_Type.tp_methods = PyBobLearnEMGaussian_methods;
+  PyBobLearnEMGaussian_Type.tp_getset = PyBobLearnEMGaussian_getseters;
+  PyBobLearnEMGaussian_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMGaussian_loglikelihood);
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMGaussian_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMGaussian_Type);
+  return PyModule_AddObject(module, "Gaussian", (PyObject*)&PyBobLearnEMGaussian_Type) >= 0;
+}
+
diff --git a/bob/learn/em/gmm_base_trainer.cpp b/bob/learn/em/gmm_base_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..96dfdc6ac2e4aec319164e6b70f26bb89de0b17c
--- /dev/null
+++ b/bob/learn/em/gmm_base_trainer.cpp
@@ -0,0 +1,437 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Web 21 Jan 12:30:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+#include <boost/make_shared.hpp>
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
+
+static auto GMMBaseTrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".GMMBaseTrainer",
+  "This class implements the E-step of the expectation-maximisation"
+  "algorithm for a :py:class:`bob.learn.em.GMMMachine`"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a GMMBaseTrainer",
+    "",
+    true
+  )
+  .add_prototype("update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
+  .add_prototype("other","")
+  .add_prototype("","")
+
+  .add_parameter("update_means", "bool", "Update means on each iteration")
+  .add_parameter("update_variances", "bool", "Update variances on each iteration")
+  .add_parameter("update_weights", "bool", "Update weights on each iteration")
+  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
+  .add_parameter("other", ":py:class:`bob.learn.em.GMMBaseTrainer`", "A GMMBaseTrainer object to be copied.")
+);
+
+
+
+static int PyBobLearnEMGMMBaseTrainer_init_copy(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMBaseTrainer_doc.kwlist(1);
+  PyBobLearnEMGMMBaseTrainerObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMBaseTrainer_Type, &tt)){
+    GMMBaseTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::GMMBaseTrainer(*tt->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMGMMBaseTrainer_init_bool(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMBaseTrainer_doc.kwlist(0);
+  PyObject* update_means     = 0;
+  PyObject* update_variances = 0;
+  PyObject* update_weights   = 0;
+  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!O!d", kwlist, &PyBool_Type, &update_means, &PyBool_Type, 
+                                                             &update_variances, &PyBool_Type, &update_weights, &mean_var_update_responsibilities_threshold)){
+    GMMBaseTrainer_doc.print_usage();
+    return -1;
+  }
+  self->cxx.reset(new bob::learn::em::GMMBaseTrainer(f(update_means), f(update_variances), f(update_weights), mean_var_update_responsibilities_threshold));
+  return 0;
+}
+
+
+static int PyBobLearnEMGMMBaseTrainer_init(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if (nargs==0){ //default initializer ()
+    self->cxx.reset(new bob::learn::em::GMMBaseTrainer());
+    return 0;
+  }
+  else{
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+
+    // If the constructor input is GMMBaseTrainer object
+    if (PyBobLearnEMGMMBaseTrainer_Check(arg))
+      return PyBobLearnEMGMMBaseTrainer_init_copy(self, args, kwargs);
+    else
+      return PyBobLearnEMGMMBaseTrainer_init_bool(self, args, kwargs);
+  }
+
+  BOB_CATCH_MEMBER("cannot create GMMBaseTrainer_init_bool", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMGMMBaseTrainer_delete(PyBobLearnEMGMMBaseTrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMGMMBaseTrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMGMMBaseTrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMGMMBaseTrainer_RichCompare(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMBaseTrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMGMMBaseTrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare GMMBaseTrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+
+/***** gmm_stats *****/
+static auto gmm_stats = bob::extension::VariableDoc(
+  "gmm_stats",
+  ":py:class:`bob.learn.em.GMMStats`",
+  "Get/Set GMMStats",
+  ""
+);
+PyObject* PyBobLearnEMGMMBaseTrainer_getGMMStats(PyBobLearnEMGMMBaseTrainerObject* self, void*){
+  BOB_TRY
+
+  bob::learn::em::GMMStats stats = self->cxx->getGMMStats();
+  boost::shared_ptr<bob::learn::em::GMMStats> stats_shared = boost::make_shared<bob::learn::em::GMMStats>(stats);
+
+  //Allocating the correspondent python object
+  PyBobLearnEMGMMStatsObject* retval =
+    (PyBobLearnEMGMMStatsObject*)PyBobLearnEMGMMStats_Type.tp_alloc(&PyBobLearnEMGMMStats_Type, 0);
+
+  retval->cxx = stats_shared;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("GMMStats could not be read", 0)
+}
+/*
+int PyBobLearnEMGMMBaseTrainer_setGMMStats(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMStats_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.GMMStats`", Py_TYPE(self)->tp_name, gmm_stats.name());
+    return -1;
+  }
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMGMMStats_Type,&stats);
+
+  self->cxx->setGMMStats(*stats->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("gmm_stats could not be set", -1)  
+}
+*/
+
+
+/***** update_means *****/
+static auto update_means = bob::extension::VariableDoc(
+  "update_means",
+  "bool",
+  "Update means on each iteration",
+  ""
+);
+PyObject* PyBobLearnEMGMMBaseTrainer_getUpdateMeans(PyBobLearnEMGMMBaseTrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("O",self->cxx->getUpdateMeans()?Py_True:Py_False);
+  BOB_CATCH_MEMBER("update_means could not be read", 0)
+}
+
+/***** update_variances *****/
+static auto update_variances = bob::extension::VariableDoc(
+  "update_variances",
+  "bool",
+  "Update variances on each iteration",
+  ""
+);
+PyObject* PyBobLearnEMGMMBaseTrainer_getUpdateVariances(PyBobLearnEMGMMBaseTrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("O",self->cxx->getUpdateVariances()?Py_True:Py_False);
+  BOB_CATCH_MEMBER("update_variances could not be read", 0)
+}
+
+
+/***** update_weights *****/
+static auto update_weights = bob::extension::VariableDoc(
+  "update_weights",
+  "bool",
+  "Update weights on each iteration",
+  ""
+);
+PyObject* PyBobLearnEMGMMBaseTrainer_getUpdateWeights(PyBobLearnEMGMMBaseTrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("O",self->cxx->getUpdateWeights()?Py_True:Py_False);
+  BOB_CATCH_MEMBER("update_weights could not be read", 0)
+}
+
+
+    
+     
+
+/***** mean_var_update_responsibilities_threshold *****/
+static auto mean_var_update_responsibilities_threshold = bob::extension::VariableDoc(
+  "mean_var_update_responsibilities_threshold",
+  "bool",
+  "Threshold over the responsibilities of the Gaussians" 
+  "Equations 9.24, 9.25 of Bishop, \"Pattern recognition and machine learning\", 2006" 
+  "require a division by the responsibilities, which might be equal to zero" 
+  "because of numerical issue. This threshold is used to avoid such divisions.",
+  ""
+);
+PyObject* PyBobLearnEMGMMBaseTrainer_getMeanVarUpdateResponsibilitiesThreshold(PyBobLearnEMGMMBaseTrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getMeanVarUpdateResponsibilitiesThreshold());
+  BOB_CATCH_MEMBER("update_weights could not be read", 0)
+}
+
+
+static PyGetSetDef PyBobLearnEMGMMBaseTrainer_getseters[] = { 
+  {
+    update_means.name(),
+    (getter)PyBobLearnEMGMMBaseTrainer_getUpdateMeans,
+    0,
+    update_means.doc(),
+    0
+  },
+  {
+    update_variances.name(),
+    (getter)PyBobLearnEMGMMBaseTrainer_getUpdateVariances,
+    0,
+    update_variances.doc(),
+    0
+  },
+  {
+    update_weights.name(),
+    (getter)PyBobLearnEMGMMBaseTrainer_getUpdateWeights,
+    0,
+    update_weights.doc(),
+    0
+  },  
+  {
+    mean_var_update_responsibilities_threshold.name(),
+    (getter)PyBobLearnEMGMMBaseTrainer_getMeanVarUpdateResponsibilitiesThreshold,
+    0,
+    mean_var_update_responsibilities_threshold.doc(),
+    0
+  },  
+  {
+    gmm_stats.name(),
+    (getter)PyBobLearnEMGMMBaseTrainer_getGMMStats,
+    0, //(setter)PyBobLearnEMGMMBaseTrainer_setGMMStats,
+    gmm_stats.doc(),
+    0
+  },  
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "Instanciate :py:class:`bob.learn.em.GMMStats`",
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMGMMBaseTrainer_initialize(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
+
+  self->cxx->initialize(*gmm_machine->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+/*** eStep ***/
+static auto eStep = bob::extension::FunctionDoc(
+  "eStep",
+  "Calculates and saves statistics across the dataset,"
+  "and saves these as m_ss. ",
+
+  "Calculates the average log likelihood of the observations given the GMM,"
+  "and returns this in average_log_likelihood."
+  "The statistics, m_ss, will be used in the mStep() that follows.",
+
+  true
+)
+.add_prototype("gmm_machine,data")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMGMMBaseTrainer_eStep(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = eStep.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** computeLikelihood ***/
+static auto compute_likelihood = bob::extension::FunctionDoc(
+  "compute_likelihood",
+  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
+  0,
+  true
+)
+.add_prototype("gmm_machine")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+static PyObject* PyBobLearnEMGMMBaseTrainer_compute_likelihood(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = compute_likelihood.kwlist(0);
+
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
+
+  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
+}
+
+
+static PyMethodDef PyBobLearnEMGMMBaseTrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMGMMBaseTrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    eStep.name(),
+    (PyCFunction)PyBobLearnEMGMMBaseTrainer_eStep,
+    METH_VARARGS|METH_KEYWORDS,
+    eStep.doc()
+  },
+  {
+    compute_likelihood.name(),
+    (PyCFunction)PyBobLearnEMGMMBaseTrainer_compute_likelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_likelihood.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMGMMBaseTrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMGMMBaseTrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMGMMBaseTrainer_Type.tp_name      = GMMBaseTrainer_doc.name();
+  PyBobLearnEMGMMBaseTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMGMMBaseTrainerObject);
+  PyBobLearnEMGMMBaseTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMGMMBaseTrainer_Type.tp_doc       = GMMBaseTrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMGMMBaseTrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMGMMBaseTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMGMMBaseTrainer_init);
+  PyBobLearnEMGMMBaseTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMGMMBaseTrainer_delete);
+  PyBobLearnEMGMMBaseTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMGMMBaseTrainer_RichCompare);
+  PyBobLearnEMGMMBaseTrainer_Type.tp_methods      = PyBobLearnEMGMMBaseTrainer_methods;
+  PyBobLearnEMGMMBaseTrainer_Type.tp_getset       = PyBobLearnEMGMMBaseTrainer_getseters;
+  PyBobLearnEMGMMBaseTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMGMMBaseTrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMGMMBaseTrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMGMMBaseTrainer_Type);
+  return PyModule_AddObject(module, "GMMBaseTrainer", (PyObject*)&PyBobLearnEMGMMBaseTrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/gmm_machine.cpp b/bob/learn/em/gmm_machine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..efa9c5ae51e566e40ec527fb090c59fd3bbb2ccf
--- /dev/null
+++ b/bob/learn/em/gmm_machine.cpp
@@ -0,0 +1,834 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Wed 11 Dec 18:01:00 2014
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto GMMMachine_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".GMMMachine",
+  "This class implements a multivariate diagonal Gaussian distribution.",
+  "See Section 2.3.9 of Bishop, \"Pattern recognition and machine learning\", 2006"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a GMMMachine",
+    "",
+    true
+  )
+  .add_prototype("n_gaussians,n_inputs","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+  .add_prototype("","")
+
+  .add_parameter("n_gaussians", "int", "Number of gaussians")
+  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
+  .add_parameter("other", ":py:class:`bob.learn.em.GMMMachine`", "A GMMMachine object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMGMMMachine_init_number(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMMachine_doc.kwlist(0);
+  int n_inputs    = 1;
+  int n_gaussians = 1;
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs))
+    return -1;
+
+  if(n_gaussians < 0){
+    PyErr_Format(PyExc_TypeError, "gaussians argument must be greater than or equal to zero");
+    return -1;
+  }
+
+  if(n_inputs < 0){
+    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
+    return -1;
+   }
+
+  self->cxx.reset(new bob::learn::em::GMMMachine(n_gaussians, n_inputs));
+  return 0;
+}
+
+
+static int PyBobLearnEMGMMMachine_init_copy(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMMachine_doc.kwlist(1);
+  PyBobLearnEMGMMMachineObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &tt)){
+    GMMMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::GMMMachine(*tt->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMGMMMachine_init_hdf5(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMMachine_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    GMMMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::GMMMachine(*(config->f)));
+
+  return 0;
+}
+
+
+
+static int PyBobLearnEMGMMMachine_init(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+  
+  switch (nargs) {
+
+    case 0: //default initializer ()
+      self->cxx.reset(new bob::learn::em::GMMMachine());
+      return 0;
+
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is Gaussian object
+     if (PyBobLearnEMGMMMachine_Check(arg))
+       return PyBobLearnEMGMMMachine_init_copy(self, args, kwargs);
+      // If the constructor input is a HDF5
+     else if (PyBobIoHDF5File_Check(arg))
+       return PyBobLearnEMGMMMachine_init_hdf5(self, args, kwargs);
+    }
+    case 2:
+      return PyBobLearnEMGMMMachine_init_number(self, args, kwargs);
+    default:
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0, 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      GMMMachine_doc.print_usage();
+      return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create GMMMachine", 0)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMGMMMachine_delete(PyBobLearnEMGMMMachineObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMGMMMachine_RichCompare(PyBobLearnEMGMMMachineObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMMachine_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMGMMMachineObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare GMMMachine objects", 0)
+}
+
+int PyBobLearnEMGMMMachine_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMGMMMachine_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int)",
+  "A tuple that represents the number of gaussians and dimensionality of each Gaussian ``(n_gaussians, dim)``.",
+  ""
+);
+PyObject* PyBobLearnEMGMMMachine_getShape(PyBobLearnEMGMMMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** MEAN *****/
+
+static auto means = bob::extension::VariableDoc(
+  "means",
+  "array_like <float, 2D>",
+  "The means of the gaussians",
+  ""
+);
+PyObject* PyBobLearnEMGMMMachine_getMeans(PyBobLearnEMGMMMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMeans());
+  BOB_CATCH_MEMBER("means could not be read", 0)
+}
+int PyBobLearnEMGMMMachine_setMeans(PyBobLearnEMGMMMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, means.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "means");
+  if (!b) return -1;
+  self->cxx->setMeans(*b);
+  return 0;
+  BOB_CATCH_MEMBER("means could not be set", -1)
+}
+
+/***** Variance *****/
+static auto variances = bob::extension::VariableDoc(
+  "variances",
+  "array_like <float, 2D>",
+  "Variances of the gaussians",
+  ""
+);
+PyObject* PyBobLearnEMGMMMachine_getVariances(PyBobLearnEMGMMMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVariances());
+  BOB_CATCH_MEMBER("variances could not be read", 0)
+}
+int PyBobLearnEMGMMMachine_setVariances(PyBobLearnEMGMMMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variances.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "variances");
+  if (!b) return -1;
+  self->cxx->setVariances(*b);
+  return 0;
+  BOB_CATCH_MEMBER("variances could not be set", -1)
+}
+
+/***** Weights *****/
+static auto weights = bob::extension::VariableDoc(
+  "weights",
+  "array_like <float, 1D>",
+  "The weights (also known as \"mixing coefficients\")",
+  ""
+);
+PyObject* PyBobLearnEMGMMMachine_getWeights(PyBobLearnEMGMMMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getWeights());
+  BOB_CATCH_MEMBER("weights could not be read", 0)
+}
+int PyBobLearnEMGMMMachine_setWeights(PyBobLearnEMGMMMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, weights.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "weights");
+  if (!b) return -1;
+  self->cxx->setWeights(*b);
+  return 0;
+  BOB_CATCH_MEMBER("weights could not be set", -1)
+}
+
+
+/***** variance_supervector *****/
+static auto variance_supervector = bob::extension::VariableDoc(
+  "variance_supervector",
+  "array_like <float, 1D>",
+  "The variance supervector of the GMMMachine",
+  "Concatenation of the variance vectors of each Gaussian of the GMMMachine"
+);
+PyObject* PyBobLearnEMGMMMachine_getVarianceSupervector(PyBobLearnEMGMMMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVarianceSupervector());
+  BOB_CATCH_MEMBER("variance_supervector could not be read", 0)
+}
+int PyBobLearnEMGMMMachine_setVarianceSupervector(PyBobLearnEMGMMMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, variance_supervector.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance_supervector");
+  if (!b) return -1;
+  self->cxx->setVarianceSupervector(*b);
+  return 0;
+  BOB_CATCH_MEMBER("variance_supervector could not be set", -1)
+}
+
+/***** mean_supervector *****/
+static auto mean_supervector = bob::extension::VariableDoc(
+  "mean_supervector",
+  "array_like <float, 1D>",
+  "The mean supervector of the GMMMachine",
+  "Concatenation of the mean vectors of each Gaussian of the GMMMachine"
+);
+PyObject* PyBobLearnEMGMMMachine_getMeanSupervector(PyBobLearnEMGMMMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMeanSupervector());
+  BOB_CATCH_MEMBER("mean_supervector could not be read", 0)
+}
+int PyBobLearnEMGMMMachine_setMeanSupervector(PyBobLearnEMGMMMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, mean_supervector.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mean_supervector");
+  if (!b) return -1;
+  self->cxx->setMeanSupervector(*b);
+  return 0;
+  BOB_CATCH_MEMBER("mean_supervector could not be set", -1)
+}
+
+
+
+/***** variance_thresholds *****/
+static auto variance_thresholds = bob::extension::VariableDoc(
+  "variance_thresholds",
+  "array_like <double, 2D>",
+  "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar. ",
+  ""
+);
+PyObject* PyBobLearnEMGMMMachine_getVarianceThresholds(PyBobLearnEMGMMMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getVarianceThresholds());
+  BOB_CATCH_MEMBER("variance_thresholds could not be read", 0)
+}
+int PyBobLearnEMGMMMachine_setVarianceThresholds(PyBobLearnEMGMMMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variance_thresholds.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "variance_thresholds");
+  if (!b) return -1;
+  self->cxx->setVarianceThresholds(*b);
+  return 0;
+  BOB_CATCH_MEMBER("variance_thresholds could not be set", -1)  
+}
+
+
+
+
+static PyGetSetDef PyBobLearnEMGMMMachine_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMGMMMachine_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  {
+   means.name(),
+   (getter)PyBobLearnEMGMMMachine_getMeans,
+   (setter)PyBobLearnEMGMMMachine_setMeans,
+   means.doc(),
+   0
+  },
+  {
+   variances.name(),
+   (getter)PyBobLearnEMGMMMachine_getVariances,
+   (setter)PyBobLearnEMGMMMachine_setVariances,
+   variances.doc(),
+   0
+  },
+  {
+   weights.name(),
+   (getter)PyBobLearnEMGMMMachine_getWeights,
+   (setter)PyBobLearnEMGMMMachine_setWeights,
+   weights.doc(),
+   0
+  },
+  {
+   variance_thresholds.name(),
+   (getter)PyBobLearnEMGMMMachine_getVarianceThresholds,
+   (setter)PyBobLearnEMGMMMachine_setVarianceThresholds,
+   variance_thresholds.doc(),
+   0
+  },
+  {
+   variance_supervector.name(),
+   (getter)PyBobLearnEMGMMMachine_getVarianceSupervector,
+   (setter)PyBobLearnEMGMMMachine_setVarianceSupervector,
+   variance_supervector.doc(),
+   0
+  },
+
+  {
+   mean_supervector.name(),
+   (getter)PyBobLearnEMGMMMachine_getMeanSupervector,
+   (setter)PyBobLearnEMGMMMachine_setMeanSupervector,
+   mean_supervector.doc(),
+   0
+  },
+  
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the GMMMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMGMMMachine_Save(PyBobLearnEMGMMMachineObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the GMMMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMGMMMachine_Load(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this GMMMachine with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.GMMMachine`", "A GMMMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMGMMMachine_IsSimilarTo(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMGMMMachineObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMGMMMachine_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Allocates space for the statistics and resets to zero.",
+  0,
+  true
+)
+.add_prototype("n_gaussians,n_inputs")
+.add_parameter("n_gaussians", "int", "Number of gaussians")
+.add_parameter("n_inputs", "int", "Dimensionality of the feature vector");
+static PyObject* PyBobLearnEMGMMMachine_resize(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int n_gaussians = 0;
+  int n_inputs = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs)) Py_RETURN_NONE;
+
+  if (n_gaussians <= 0){
+    PyErr_Format(PyExc_TypeError, "n_gaussians must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+  if (n_inputs <= 0){
+    PyErr_Format(PyExc_TypeError, "n_inputs must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+
+  self->cxx->resize(n_gaussians, n_inputs);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** log_likelihood ***/
+static auto log_likelihood = bob::extension::FunctionDoc(
+  "log_likelihood",
+  "Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are checked.",
+  ".. note:: The :py:meth:`__call__` function is an alias for this.", 
+  true
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <float, 1D>", "Input vector")
+.add_return("output","float","The log likelihood");
+static PyObject* PyBobLearnEMGMMMachine_loglikelihood(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = log_likelihood.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
+}
+
+
+/*** log_likelihood_ ***/
+static auto log_likelihood_ = bob::extension::FunctionDoc(
+  "log_likelihood_",
+  "Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are NOT checked.",
+  "", 
+  true
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <float, 1D>", "Input vector")
+.add_return("output","float","The log likelihood");
+static PyObject* PyBobLearnEMGMMMachine_loglikelihood_(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = log_likelihood_.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
+}
+
+
+/*** acc_statistics ***/
+static auto acc_statistics = bob::extension::FunctionDoc(
+  "acc_statistics",
+  "Accumulate the GMM statistics for this sample(s). Inputs are checked.",
+  "", 
+  true
+)
+.add_prototype("input,stats")
+.add_parameter("input", "array_like <float, 2D>", "Input vector")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM");
+static PyObject* PyBobLearnEMGMMMachine_accStatistics(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = acc_statistics.kwlist(0);
+
+  PyBlitzArrayObject* input           = 0;
+  PyBobLearnEMGMMStatsObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input, 
+                                                                 &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+  self->cxx->accStatistics(*PyBlitzArrayCxx_AsBlitz<double,2>(input), *stats->cxx);
+
+  BOB_CATCH_MEMBER("cannot accumulate the statistics", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** acc_statistics_ ***/
+static auto acc_statistics_ = bob::extension::FunctionDoc(
+  "acc_statistics_",
+  "Accumulate the GMM statistics for this sample(s). Inputs are NOT checked.",
+  "", 
+  true
+)
+.add_prototype("input,stats")
+.add_parameter("input", "array_like <float, 2D>", "Input vector")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM");
+static PyObject* PyBobLearnEMGMMMachine_accStatistics_(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = acc_statistics_.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+  PyBobLearnEMGMMStatsObject* stats = 0;
+
+
+
+ if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input, 
+                                                                 &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+  self->cxx->accStatistics_(*PyBlitzArrayCxx_AsBlitz<double,2>(input), *stats->cxx);
+
+  BOB_CATCH_MEMBER("cannot accumulate the statistics", 0)
+  Py_RETURN_NONE;
+}
+
+
+
+/*** set_variance_thresholds ***/
+static auto set_variance_thresholds = bob::extension::FunctionDoc(
+  "set_variance_thresholds",
+  "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar.",
+  "",
+  true
+)
+.add_prototype("input")
+.add_parameter("input", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMGMMMachine_setVarianceThresholds_method(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = set_variance_thresholds.kwlist(0);
+
+  PyBlitzArrayObject* input_array = 0;
+  double input_number = 0;
+  if(PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &input_number)){
+    self->cxx->setVarianceThresholds(input_number);
+  }
+  else if(PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter,&input_array)) {
+    //protects acquired resources through this scope
+    auto input_ = make_safe(input_array);
+    self->cxx->setVarianceThresholds(*PyBlitzArrayCxx_AsBlitz<double,1>(input_array));
+  }
+  else
+    return 0;
+
+
+
+  BOB_CATCH_MEMBER("cannot accumulate set the variance threshold", 0)
+  Py_RETURN_NONE;
+}
+
+
+
+
+/*** get_gaussian ***/
+static auto get_gaussian = bob::extension::FunctionDoc(
+  "get_gaussian",
+  "Get the specified Gaussian component.",
+  ".. note:: An exception is thrown if i is out of range.", 
+  true
+)
+.add_prototype("i","gaussian")
+.add_parameter("i", "int", "Index of the gaussian")
+.add_return("gaussian",":py:class:`bob.learn.em.Gaussian`","Gaussian object");
+static PyObject* PyBobLearnEMGMMMachine_get_gaussian(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_gaussian.kwlist(0);
+
+  int i = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+ 
+  boost::shared_ptr<bob::learn::em::Gaussian> gaussian = self->cxx->getGaussian(i);
+
+  //Allocating the correspondent python object
+  PyBobLearnEMGaussianObject* retval =
+    (PyBobLearnEMGaussianObject*)PyBobLearnEMGaussian_Type.tp_alloc(&PyBobLearnEMGaussian_Type, 0);
+
+  retval->cxx = gaussian;
+   
+  //return reinterpret_cast<PyObject*>(retval);
+  return Py_BuildValue("O",retval);
+
+  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
+}
+
+
+
+static PyMethodDef PyBobLearnEMGMMMachine_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  {
+    log_likelihood.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_loglikelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    log_likelihood.doc()
+  },
+  {
+    log_likelihood_.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_loglikelihood_,
+    METH_VARARGS|METH_KEYWORDS,
+    log_likelihood_.doc()
+  },
+  {
+    acc_statistics.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_accStatistics,
+    METH_VARARGS|METH_KEYWORDS,
+    acc_statistics.doc()
+  },
+  {
+    acc_statistics_.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_accStatistics_,
+    METH_VARARGS|METH_KEYWORDS,
+    acc_statistics_.doc()
+  },
+ 
+  {
+    get_gaussian.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_get_gaussian,
+    METH_VARARGS|METH_KEYWORDS,
+    get_gaussian.doc()
+  },
+
+  {
+    set_variance_thresholds.name(),
+    (PyCFunction)PyBobLearnEMGMMMachine_setVarianceThresholds_method,
+    METH_VARARGS|METH_KEYWORDS,
+    set_variance_thresholds.doc()
+  },
+  
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMGMMMachine_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMGMMMachine(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMGMMMachine_Type.tp_name = GMMMachine_doc.name();
+  PyBobLearnEMGMMMachine_Type.tp_basicsize = sizeof(PyBobLearnEMGMMMachineObject);
+  PyBobLearnEMGMMMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMGMMMachine_Type.tp_doc = GMMMachine_doc.doc();
+
+  // set the functions
+  PyBobLearnEMGMMMachine_Type.tp_new = PyType_GenericNew;
+  PyBobLearnEMGMMMachine_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnEMGMMMachine_init);
+  PyBobLearnEMGMMMachine_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnEMGMMMachine_delete);
+  PyBobLearnEMGMMMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMGMMMachine_RichCompare);
+  PyBobLearnEMGMMMachine_Type.tp_methods = PyBobLearnEMGMMMachine_methods;
+  PyBobLearnEMGMMMachine_Type.tp_getset = PyBobLearnEMGMMMachine_getseters;
+  PyBobLearnEMGMMMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMGMMMachine_loglikelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMGMMMachine_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMGMMMachine_Type);
+  return PyModule_AddObject(module, "GMMMachine", (PyObject*)&PyBobLearnEMGMMMachine_Type) >= 0;
+}
+
diff --git a/bob/learn/em/gmm_stats.cpp b/bob/learn/em/gmm_stats.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a02a3876de987af09d3891e9a4df284bbc2efb33
--- /dev/null
+++ b/bob/learn/em/gmm_stats.cpp
@@ -0,0 +1,623 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Wed 03 Dec 14:38:48 2014
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto GMMStats_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".GMMStats",
+  "A container for GMM statistics",
+  "With respect to [Reynolds2000]_ the class computes: \n\n"
+  "* Eq (8) is :py:class:`bob.learn.em.GMMStats.n`: :math:`n_i=\\sum\\limits_{t=1}^T Pr(i | x_t)`\n\n"
+  "* Eq (9) is :py:class:`bob.learn.em.GMMStats.sum_px`:  :math:`E_i(x)=\\frac{1}{n(i)}\\sum\\limits_{t=1}^T Pr(i | x_t)x_t`\n\n"
+  "* Eq (10) is :py:class:`bob.learn.em.GMMStats.sum_pxx`: :math:`E_i(x^2)=\\frac{1}{n(i)}\\sum\\limits_{t=1}^T Pr(i | x_t)x_t^2`\n\n"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "A container for GMM statistics.",
+    "",
+    true
+  )
+  .add_prototype("n_gaussians,n_inputs","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+  .add_prototype("","")
+
+  .add_parameter("n_gaussians", "int", "Number of gaussians")
+  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
+  .add_parameter("other", ":py:class:`bob.learn.em.GMMStats`", "A GMMStats object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMGMMStats_init_number(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMStats_doc.kwlist(0);
+  int n_inputs    = 1;
+  int n_gaussians = 1;
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs))
+    return -1;
+
+  if(n_gaussians < 0){
+    PyErr_Format(PyExc_TypeError, "gaussians argument must be greater than or equal to zero");
+    GMMStats_doc.print_usage();
+    return -1;
+  }
+
+  if(n_inputs < 0){
+    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
+    GMMStats_doc.print_usage();
+    return -1;
+   }
+
+  self->cxx.reset(new bob::learn::em::GMMStats(n_gaussians, n_inputs));
+  return 0;
+}
+
+
+static int PyBobLearnEMGMMStats_init_copy(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMStats_doc.kwlist(1);
+  PyBobLearnEMGMMStatsObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &tt)){
+    GMMStats_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::GMMStats(*tt->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMGMMStats_init_hdf5(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = GMMStats_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    GMMStats_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::GMMStats(*(config->f)));
+
+  return 0;
+}
+
+
+
+static int PyBobLearnEMGMMStats_init(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch (nargs) {
+
+    case 0: //default initializer ()
+      self->cxx.reset(new bob::learn::em::GMMStats());
+      return 0;
+
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      /**If the constructor input is Gaussian object**/	
+     if (PyBobLearnEMGMMStats_Check(arg))
+       return PyBobLearnEMGMMStats_init_copy(self, args, kwargs);
+      /**If the constructor input is a HDF5**/
+     else if (PyBobIoHDF5File_Check(arg))
+       return PyBobLearnEMGMMStats_init_hdf5(self, args, kwargs);
+    }
+    case 2:
+      return PyBobLearnEMGMMStats_init_number(self, args, kwargs);
+    default:
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0, 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      GMMStats_doc.print_usage();
+      return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create GMMStats", 0)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMGMMStats_delete(PyBobLearnEMGMMStatsObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMGMMStats_RichCompare(PyBobLearnEMGMMStatsObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMStats_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMGMMStatsObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare GMMStats objects", 0)
+}
+
+int PyBobLearnEMGMMStats_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMGMMStats_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** n *****/
+static auto n = bob::extension::VariableDoc(
+  "n",
+  "array_like <float, 1D>",
+  "For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of :math:`P(gaussian_i|x)`"
+);
+PyObject* PyBobLearnEMGMMStats_getN(PyBobLearnEMGMMStatsObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->n);
+  BOB_CATCH_MEMBER("n could not be read", 0)
+}
+int PyBobLearnEMGMMStats_setN(PyBobLearnEMGMMStatsObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, n.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "n");
+  if (!b) return -1;
+  self->cxx->n = *b;
+  return 0;
+  BOB_CATCH_MEMBER("n could not be set", -1)  
+}
+
+
+/***** sum_px *****/
+static auto sum_px = bob::extension::VariableDoc(
+  "sum_px",
+  "array_like <float, 2D>",
+  "For each Gaussian, the accumulated sum of responsibility times the sample"
+);
+PyObject* PyBobLearnEMGMMStats_getSum_px(PyBobLearnEMGMMStatsObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->sumPx);
+  BOB_CATCH_MEMBER("sum_px could not be read", 0)
+}
+int PyBobLearnEMGMMStats_setSum_px(PyBobLearnEMGMMStatsObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, sum_px.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "sum_px");
+  if (!b) return -1;
+  self->cxx->sumPx = *b;
+  return 0;
+  BOB_CATCH_MEMBER("sum_px could not be set", -1)  
+}
+
+
+/***** sum_pxx *****/
+static auto sum_pxx = bob::extension::VariableDoc(
+  "sum_pxx",
+  "array_like <float, 2D>",
+  "For each Gaussian, the accumulated sum of responsibility times the sample squared"
+);
+PyObject* PyBobLearnEMGMMStats_getSum_pxx(PyBobLearnEMGMMStatsObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->sumPxx);
+  BOB_CATCH_MEMBER("sum_pxx could not be read", 0)
+}
+int PyBobLearnEMGMMStats_setSum_pxx(PyBobLearnEMGMMStatsObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, sum_pxx.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "sum_pxx");
+  if (!b) return -1;
+  self->cxx->sumPxx = *b;
+  return 0;
+  BOB_CATCH_MEMBER("sum_pxx could not be set", -1)  
+}
+
+
+/***** t *****/
+static auto t = bob::extension::VariableDoc(
+  "t",
+  "int",
+  "The number of samples"
+);
+PyObject* PyBobLearnEMGMMStats_getT(PyBobLearnEMGMMStatsObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("i", self->cxx->T);
+  BOB_CATCH_MEMBER("t could not be read", 0)
+}
+int PyBobLearnEMGMMStats_setT(PyBobLearnEMGMMStatsObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyInt_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an int", Py_TYPE(self)->tp_name, t.name());
+    return -1;
+  }
+
+  if (PyInt_AS_LONG(value) < 0){
+    PyErr_Format(PyExc_TypeError, "t must be greater than or equal to zero");
+    return -1;
+  }
+
+  self->cxx->T = PyInt_AS_LONG(value);
+  BOB_CATCH_MEMBER("t could not be set", -1)
+  return 0;
+}
+
+
+/***** log_likelihood *****/
+static auto log_likelihood = bob::extension::VariableDoc(
+  "log_likelihood",
+  "double",
+  "The accumulated log likelihood of all samples"
+);
+PyObject* PyBobLearnEMGMMStats_getLog_likelihood(PyBobLearnEMGMMStatsObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("d","log_likelihood", self->cxx->log_likelihood);
+  BOB_CATCH_MEMBER("log_likelihood could not be read", 0)
+}
+int PyBobLearnEMGMMStats_setLog_likelihood(PyBobLearnEMGMMStatsObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, t.name());
+    return -1;
+  }
+
+  self->cxx->log_likelihood = PyFloat_AsDouble(value);
+  return 0;
+  BOB_CATCH_MEMBER("log_likelihood could not be set", -1)
+}
+
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int)",
+  "A tuple that represents the number of gaussians and dimensionality of each Gaussian ``(n_gaussians, dim)``.",
+  ""
+);
+PyObject* PyBobLearnEMGMMStats_getShape(PyBobLearnEMGMMStatsObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i)", self->cxx->sumPx.shape()[0], self->cxx->sumPx.shape()[1]);
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+
+
+static PyGetSetDef PyBobLearnEMGMMStats_getseters[] = {
+  {
+    n.name(),
+    (getter)PyBobLearnEMGMMStats_getN,
+    (setter)PyBobLearnEMGMMStats_setN,
+    n.doc(),
+    0
+  },
+  {
+    sum_px.name(),
+    (getter)PyBobLearnEMGMMStats_getSum_px,
+    (setter)PyBobLearnEMGMMStats_setSum_px,
+    sum_px.doc(),
+    0
+  },
+  {
+    sum_pxx.name(),
+    (getter)PyBobLearnEMGMMStats_getSum_pxx,
+    (setter)PyBobLearnEMGMMStats_setSum_pxx,
+    sum_pxx.doc(),
+    0
+  },
+  {
+    t.name(),
+    (getter)PyBobLearnEMGMMStats_getT,
+    (setter)PyBobLearnEMGMMStats_setT,
+    t.doc(),
+    0
+  },
+  {
+    log_likelihood.name(),
+    (getter)PyBobLearnEMGMMStats_getLog_likelihood,
+    (setter)PyBobLearnEMGMMStats_setLog_likelihood,
+    log_likelihood.doc(),
+    0
+  },  
+  {
+   shape.name(),
+   (getter)PyBobLearnEMGMMStats_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the GMMStats to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMGMMStats_Save(PyBobLearnEMGMMStatsObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the GMMStats to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMGMMStats_Load(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this GMMStats with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.GMMStats`", "A GMMStats object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMGMMStats_IsSimilarTo(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMGMMStatsObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMGMMStats_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Allocates space for the statistics and resets to zero.",
+  0,
+  true
+)
+.add_prototype("n_gaussians,n_inputs")
+.add_parameter("n_gaussians", "int", "Number of gaussians")
+.add_parameter("n_inputs", "int", "Dimensionality of the feature vector");
+static PyObject* PyBobLearnEMGMMStats_resize(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int n_gaussians = 0;
+  int n_inputs = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_gaussians, &n_inputs)) Py_RETURN_NONE;
+
+  if (n_gaussians <= 0){
+    PyErr_Format(PyExc_TypeError, "n_gaussians must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+  if (n_inputs <= 0){
+    PyErr_Format(PyExc_TypeError, "n_inputs must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+
+
+  self->cxx->resize(n_gaussians, n_inputs);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** init ***/
+static auto init = bob::extension::FunctionDoc(
+  "init",
+  " Resets statistics to zero."
+)
+.add_prototype("");
+static PyObject* PyBobLearnEMGMMStats_init_method(PyBobLearnEMGMMStatsObject* self) {
+  BOB_TRY
+
+  self->cxx->init();
+
+  BOB_CATCH_MEMBER("cannot perform the init method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+static PyMethodDef PyBobLearnEMGMMStats_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMGMMStats_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMGMMStats_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMGMMStats_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMGMMStats_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  {
+    init.name(),
+    (PyCFunction)PyBobLearnEMGMMStats_init_method,
+    METH_NOARGS,
+    init.doc()
+  },
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Operators *******************************************/
+/******************************************************************/
+
+static PyBobLearnEMGMMStatsObject* PyBobLearnEMGMMStats_inplaceadd(PyBobLearnEMGMMStatsObject* self, PyObject* other) {
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMStats_Check(other)){
+    PyErr_Format(PyExc_TypeError, "expected bob.learn.em.GMMStats object");
+    return 0;
+  }
+
+  auto other_ = reinterpret_cast<PyBobLearnEMGMMStatsObject*>(other);
+
+  self->cxx->operator+=(*other_->cxx);
+
+  BOB_CATCH_MEMBER("it was not possible to process the operator +=", 0)
+
+  Py_INCREF(self);
+  return self;
+}
+
+static PyNumberMethods PyBobLearnEMGMMStats_operators = {0};
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMGMMStats_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMGMMStats(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMGMMStats_Type.tp_name = GMMStats_doc.name();
+  PyBobLearnEMGMMStats_Type.tp_basicsize = sizeof(PyBobLearnEMGMMStatsObject);
+  PyBobLearnEMGMMStats_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_INPLACEOPS;
+  PyBobLearnEMGMMStats_Type.tp_doc = GMMStats_doc.doc();
+
+  // set the functions
+  PyBobLearnEMGMMStats_Type.tp_new = PyType_GenericNew;
+  PyBobLearnEMGMMStats_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnEMGMMStats_init);
+  PyBobLearnEMGMMStats_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnEMGMMStats_delete);
+  PyBobLearnEMGMMStats_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMGMMStats_RichCompare);
+  PyBobLearnEMGMMStats_Type.tp_methods = PyBobLearnEMGMMStats_methods;
+  PyBobLearnEMGMMStats_Type.tp_getset = PyBobLearnEMGMMStats_getseters;
+  PyBobLearnEMGMMStats_Type.tp_call = 0;
+  PyBobLearnEMGMMStats_Type.tp_as_number = &PyBobLearnEMGMMStats_operators;
+
+  //set operators
+  PyBobLearnEMGMMStats_operators.nb_inplace_add = reinterpret_cast<binaryfunc>(PyBobLearnEMGMMStats_inplaceadd);
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMGMMStats_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMGMMStats_Type);
+  return PyModule_AddObject(module, "GMMStats", (PyObject*)&PyBobLearnEMGMMStats_Type) >= 0;
+}
+
diff --git a/bob/learn/em/include/.DS_Store b/bob/learn/em/include/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..34b4a20f450a2fe6b32abb36f282ba6e1cc123f8
Binary files /dev/null and b/bob/learn/em/include/.DS_Store differ
diff --git a/bob/learn/em/include/bob.learn.em/EMPCATrainer.h b/bob/learn/em/include/bob.learn.em/EMPCATrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..90153c2adaeece5b179598316ecb7e766bb4850b
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/EMPCATrainer.h
@@ -0,0 +1,200 @@
+/**
+ * @date Tue Oct 11 12:18:23 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Expectation Maximization Algorithm for Principal Component
+ * Analysis
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_EMPCA_TRAINER_H
+#define BOB_LEARN_EM_EMPCA_TRAINER_H
+
+#include <bob.learn.linear/machine.h>
+#include <blitz/array.h>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief Trains a linear machine using an Expectation-Maximization algorithm
+ * on the given dataset.\n
+ * References:\n
+ *  1. "Probabilistic Principal Component Analysis",
+ *     Michael Tipping and Christopher Bishop,
+ *     Journal of the Royal Statistical Society,
+ *      Series B, 61, Part 3, pp. 611–622\n
+ *  2. "EM Algorithms for PCA and SPCA",
+ *     Sam Roweis, Neural Information Processing Systems 10 (NIPS'97),
+ *     pp.626-632 (Sensible Principal Component Analysis part)\n
+ *
+ * Notations used are the ones from reference 1.\n
+ * The probabilistic model is given by: \f$t = W x + \mu + \epsilon\f$\n
+ *  - \f$t\f$ is the observed data (dimension \f$f\f$)\n
+ *  - \f$W\f$ is a  projection matrix (dimension \f$f \times d\f$)\n
+ *  - \f$x\f$ is the projected data (dimension \f$d < f\f$)\n
+ *  - \f$\mu\f$ is the mean of the data (dimension \f$f\f$)\n
+ *  - \f$\epsilon\f$ is the noise of the data (dimension \f$f\f$)
+ *      Gaussian with zero-mean and covariance matrix \f$\sigma^2 Id\f$
+ */
+class EMPCATrainer
+{
+  public: //api
+    /**
+     * @brief Initializes a new EM PCA trainer. The training stage will place the
+     * resulting components in the linear machine and set it up to
+     * extract the variable means automatically.
+     */
+    EMPCATrainer(bool compute_likelihood=true);
+
+    /**
+     * @brief Copy constructor
+     */
+    EMPCATrainer(const EMPCATrainer& other);
+
+    /**
+     * @brief (virtual) Destructor
+     */
+    virtual ~EMPCATrainer();
+
+    /**
+     * @brief Assignment operator
+     */
+    EMPCATrainer& operator=(const EMPCATrainer& other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const EMPCATrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const EMPCATrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const EMPCATrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief This methods performs some initialization before the EM loop.
+     */
+    virtual void initialize(bob::learn::linear::Machine& machine,
+      const blitz::Array<double,2>& ar);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset, and saves
+     * these as m_z_{first,second}_order.
+     *
+     * The statistics will be used in the mStep() that follows.
+     */
+    virtual void eStep(bob::learn::linear::Machine& machine,
+      const blitz::Array<double,2>& ar);
+
+    /**
+     * @brief Performs a maximization step to update the parameters of the
+     * factor analysis model.
+     */
+    virtual void mStep(bob::learn::linear::Machine& machine,
+       const blitz::Array<double,2>& ar);
+
+    /**
+     * @brief Computes the average log likelihood using the current estimates
+     * of the latent variables.
+     */
+    virtual double computeLikelihood(bob::learn::linear::Machine& machine);
+
+    /**
+     * @brief Sets \f$\sigma^2\f$ (Mostly for test purpose)
+     */
+    void setSigma2(double sigma2) { m_sigma2 = sigma2; }
+
+    /**
+     * @brief Gets \f$\sigma^2\f$ (Mostly for test purpose)
+     */
+    double getSigma2() const { return m_sigma2; }
+
+    /**
+     * @brief Sets the Random Number Generator
+     */
+    void setRng(const boost::shared_ptr<boost::mt19937> rng)
+    { m_rng = rng; }
+
+    /**
+     * @brief Gets the Random Number Generator
+     */
+    const boost::shared_ptr<boost::mt19937> getRng() const
+    { return m_rng; }
+
+
+  private: //representation
+
+    bool m_compute_likelihood;
+    boost::shared_ptr<boost::mt19937> m_rng;
+
+    blitz::Array<double,2> m_S; /// Covariance of the training data (required only if we need to compute the log likelihood)
+    blitz::Array<double,2> m_z_first_order; /// Current mean of the \f$z_{n}\f$ latent variable
+    blitz::Array<double,3> m_z_second_order; /// Current covariance of the \f$z_{n}\f$ latent variable
+    blitz::Array<double,2> m_inW; /// The matrix product \f$W^T W\f$
+    blitz::Array<double,2> m_invM; /// The matrix \f$inv(M)\f$, where \f$M = W^T W + \sigma^2 Id\f$
+    double m_sigma2; /// The variance \f$sigma^2\f$ of the noise epsilon of the probabilistic model
+    double m_f_log2pi; /// The constant \f$n_{features} log(2*\pi)\f$ used during the likelihood computation
+
+    // Working arrays
+    mutable blitz::Array<double,2> m_tmp_dxf; /// size dimensionality x n_features
+    mutable blitz::Array<double,1> m_tmp_d; /// size dimensionality
+    mutable blitz::Array<double,1> m_tmp_f; /// size n_features
+    mutable blitz::Array<double,2> m_tmp_dxd_1; /// size dimensionality x dimensionality
+    mutable blitz::Array<double,2> m_tmp_dxd_2; /// size dimensionality x dimensionality
+    mutable blitz::Array<double,2> m_tmp_fxd_1; /// size n_features x dimensionality
+    mutable blitz::Array<double,2> m_tmp_fxd_2; /// size n_features x dimensionality
+    mutable blitz::Array<double,2> m_tmp_fxf_1; /// size n_features x n_features
+    mutable blitz::Array<double,2> m_tmp_fxf_2; /// size n_features x n_features
+
+
+    /**
+     * @brief Initializes/resizes the (array) members
+     */
+    void initMembers(const bob::learn::linear::Machine& machine,
+      const blitz::Array<double,2>& ar);
+    /**
+     * @brief Computes the mean and the variance (if required) of the training
+     * data
+     */
+    void computeMeanVariance(bob::learn::linear::Machine& machine,
+      const blitz::Array<double,2>& ar);
+    /**
+     * @brief Random initialization of \f$W\f$ and \f$sigma^2\f$.
+     * W is the projection matrix (from the LinearMachine)
+     */
+    void initRandomWSigma2(bob::learn::linear::Machine& machine);
+    /**
+     * @brief Computes the product \f$W^T W\f$.
+     * \f$W\f$ is the projection matrix (from the LinearMachine)
+     */
+    void computeWtW(bob::learn::linear::Machine& machine);
+    /**
+     * @brief Computes the inverse of \f$M\f$ matrix, where
+     *   \f$M = W^T W + \sigma^2 Id\f$.
+     *   \f$W\f$ is the projection matrix (from the LinearMachine)
+     */
+    void computeInvM();
+    /**
+     * @brief M-Step (part 1): Computes the new estimate of \f$W\f$ using the
+     * new estimated statistics.
+     */
+    void updateW(bob::learn::linear::Machine& machine,
+       const blitz::Array<double,2>& ar);
+    /**
+     * @brief M-Step (part 2): Computes the new estimate of \f$\sigma^2\f$ using
+     * the new estimated statistics.
+     */
+    void updateSigma2(bob::learn::linear::Machine& machine,
+       const blitz::Array<double,2>& ar);
+};
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_EM_EMPCA_TRAINER_H */
diff --git a/bob/learn/em/include/bob.learn.em/FABase.h b/bob/learn/em/include/bob.learn.em/FABase.h
new file mode 100644
index 0000000000000000000000000000000000000000..3b521989e96e50c27518a7929249abd6f15d9414
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/FABase.h
@@ -0,0 +1,293 @@
+/**
+ * @date Tue Jan 27 15:51:15 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief A base class for Factor Analysis
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_FABASE_H
+#define BOB_LEARN_EM_FABASE_H
+
+#include <stdexcept>
+
+#include <bob.learn.em/GMMMachine.h>
+#include <boost/shared_ptr.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief A FA Base class which contains U, V and D matrices
+ * TODO: add a reference to the journal articles
+ */
+class FABase
+{
+  public:
+    /**
+     * @brief Default constructor. Builds an otherwise invalid 0 x 0 FABase
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     */
+    FABase();
+
+    /**
+     * @brief Constructor. Builds a new FABase.
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     *
+     * @param ubm The Universal Background Model
+     * @param ru size of U (CD x ru)
+     * @param rv size of U (CD x rv)
+     * @warning ru and rv SHOULD BE  >= 1. Just set U/V/D to zero if you want
+     *   to ignore one subspace. This is the case for ISV.
+     */
+    FABase(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
+
+    /**
+     * @brief Copy constructor
+     */
+    FABase(const FABase& other);
+
+    /**
+     * @brief Just to virtualise the destructor
+     */
+    virtual ~FABase();
+
+    /**
+     * @brief Assigns from a different JFA machine
+     */
+    FABase& operator=(const FABase &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const FABase& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const FABase& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const FABase& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Returns the UBM
+     */
+    const boost::shared_ptr<bob::learn::em::GMMMachine> getUbm() const
+    { return m_ubm; }
+
+    /**
+     * @brief Returns the U matrix
+     */
+    const blitz::Array<double,2>& getU() const
+    { return m_U; }
+
+    /**
+     * @brief Returns the V matrix
+     */
+    const blitz::Array<double,2>& getV() const
+    { return m_V; }
+
+    /**
+     * @brief Returns the diagonal matrix diag(d) (as a 1D vector)
+     */
+    const blitz::Array<double,1>& getD() const
+    { return m_d; }
+
+    /**
+     * @brief Returns the UBM mean supervector (as a 1D vector)
+     */
+    const blitz::Array<double,1>& getUbmMean() const
+    { return m_cache_mean; }
+
+    /**
+     * @brief Returns the UBM variance supervector (as a 1D vector)
+     */
+    const blitz::Array<double,1>& getUbmVariance() const
+    { return m_cache_sigma; }
+
+    /**
+     * @brief Returns the number of Gaussian components C
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNGaussians() const
+    { if(!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
+      return m_ubm->getNGaussians(); }
+
+    /**
+     * @brief Returns the feature dimensionality D
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNInputs() const
+    { if(!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
+      return m_ubm->getNInputs(); }
+
+    /**
+     * @brief Returns the supervector length CD
+     * (CxD: Number of Gaussian components by the feature dimensionality)
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getSupervectorLength() const
+    { if(!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
+      return m_ubm->getNInputs()*m_ubm->getNGaussians(); }
+
+    /**
+     * @brief Returns the size/rank ru of the U matrix
+     */
+    const size_t getDimRu() const
+    { return m_ru; }
+
+    /**
+     * @brief Returns the size/rank rv of the V matrix
+     */
+    const size_t getDimRv() const
+    { return m_rv; }
+
+    /**
+     * @brief Resets the dimensionality of the subspace U and V
+     * U and V are hence uninitialized.
+     */
+    void resize(const size_t ru, const size_t rv);
+
+    /**
+     * @brief Resets the dimensionality of the subspace U and V,
+     * assuming that no UBM has yet been set
+     * U and V are hence uninitialized.
+     */
+    void resize(const size_t ru, const size_t rv, const size_t cd);
+
+    /**
+     * @brief Returns the U matrix in order to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,2>& updateU()
+    { return m_U; }
+
+    /**
+     * @brief Returns the V matrix in order to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,2>& updateV()
+    { return m_V; }
+
+    /**
+     * @brief Returns the diagonal matrix diag(d) (as a 1D vector) in order
+     * to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,1>& updateD()
+    { return m_d; }
+
+
+    /**
+     * @brief Sets (the mean supervector of) the Universal Background Model
+     * U, V and d are uninitialized in case of dimensions update (C or D)
+     */
+    void setUbm(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm);
+
+    /**
+     * @brief Sets the U matrix
+     */
+    void setU(const blitz::Array<double,2>& U);
+
+    /**
+     * @brief Sets the V matrix
+     */
+    void setV(const blitz::Array<double,2>& V);
+
+    /**
+     * @brief Sets the diagonal matrix diag(d)
+     * (a 1D vector is expected as an argument)
+     */
+    void setD(const blitz::Array<double,1>& d);
+
+
+    /**
+     * @brief Estimates x from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateX(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& x) const;
+
+    /**
+     * @brief Compute and put U^{T}.Sigma^{-1} matrix in cache
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    void updateCacheUbmUVD();
+
+
+  private:
+    /**
+     * @brief Update cache arrays/variables
+     */
+    void updateCache();
+    /**
+     * @brief Put GMM mean/variance supervector in cache
+     */
+    void updateCacheUbm();
+    /**
+     * @brief Resize working arrays
+     */
+    void resizeTmp();
+    /**
+     * @brief Computes (Id + U^T.Sigma^-1.U.N_{i,h}.U)^-1 =
+     *   (Id + sum_{c=1..C} N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c})^-1
+     */
+    void computeIdPlusUSProdInv(const bob::learn::em::GMMStats& gmm_stats,
+      blitz::Array<double,2>& out) const;
+    /**
+     * @brief Computes Fn_x = sum_{sessions h}(N*(o - m))
+     * (Normalised first order statistics)
+     */
+    void computeFn_x(const bob::learn::em::GMMStats& gmm_stats,
+      blitz::Array<double,1>& out) const;
+    /**
+     * @brief Estimates the value of x from the passed arguments
+     * (IdPlusUSProdInv and Fn_x), considering the LPT assumption
+     */
+    void estimateX(const blitz::Array<double,2>& IdPlusUSProdInv,
+      const blitz::Array<double,1>& Fn_x, blitz::Array<double,1>& x) const;
+
+
+    // UBM
+    boost::shared_ptr<bob::learn::em::GMMMachine> m_ubm;
+
+    // dimensionality
+    size_t m_ru; // size of U (CD x ru)
+    size_t m_rv; // size of V (CD x rv)
+
+    // U, V, D matrices
+    // D is assumed to be diagonal, and only the diagonal is stored
+    blitz::Array<double,2> m_U;
+    blitz::Array<double,2> m_V;
+    blitz::Array<double,1> m_d;
+
+    // Vectors/Matrices precomputed in cache
+    blitz::Array<double,1> m_cache_mean;
+    blitz::Array<double,1> m_cache_sigma;
+    blitz::Array<double,2> m_cache_UtSigmaInv;
+
+    mutable blitz::Array<double,2> m_tmp_IdPlusUSProdInv;
+    mutable blitz::Array<double,1> m_tmp_Fn_x;
+    mutable blitz::Array<double,1> m_tmp_ru;
+    mutable blitz::Array<double,2> m_tmp_ruD;
+    mutable blitz::Array<double,2> m_tmp_ruru;
+};
+
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_FABASE_H
diff --git a/bob/learn/em/include/bob.learn.em/FABaseTrainer.h b/bob/learn/em/include/bob.learn.em/FABaseTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..037b4f2a35ab7d2237b0291a7fa75631a5008aa5
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/FABaseTrainer.h
@@ -0,0 +1,350 @@
+/**
+ * @date Sat Jan 31 17:16:17 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief FABaseTrainer functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_FABASETRAINER_H
+#define BOB_LEARN_EM_FABASETRAINER_H
+
+#include <blitz/array.h>
+#include <bob.learn.em/GMMStats.h>
+#include <bob.learn.em/JFAMachine.h>
+#include <vector>
+
+#include <map>
+#include <string>
+#include <bob.core/array_copy.h>
+#include <boost/shared_ptr.hpp>
+#include <boost/random.hpp>
+#include <bob.core/logging.h>
+
+namespace bob { namespace learn { namespace em {
+
+class FABaseTrainer
+{
+  public:
+    /**
+     * @brief Constructor
+     */
+    FABaseTrainer();
+
+    /**
+     * @brief Copy constructor
+     */
+    FABaseTrainer(const FABaseTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    ~FABaseTrainer();
+
+    /**
+     * @brief Check that the dimensionality of the statistics match.
+     */
+    void checkStatistics(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+
+    /**
+     * @brief Initialize the dimensionality, the UBM, the sums of the
+     * statistics and the number of identities.
+     */
+    void initUbmNidSumStatistics(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+
+    /**
+     * @brief Precomputes the sums of the zeroth order statistics over the
+     * sessions for each client
+     */
+    void precomputeSumStatisticsN(const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Precomputes the sums of the first order statistics over the
+     * sessions for each client
+     */
+    void precomputeSumStatisticsF(const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+
+    /**
+     * @brief Initializes (allocates and sets to zero) the x, y, z speaker
+     * factors
+     */
+    void initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+
+    /**
+     * @brief Resets the x, y, z speaker factors to zero values
+     */
+    void resetXYZ();
+
+
+    /**** Y and V functions ****/
+    /**
+     * @brief Computes Vt * diag(sigma)^-1
+     */
+    void computeVtSigmaInv(const bob::learn::em::FABase& m);
+    /**
+     * @brief Computes Vt_{c} * diag(sigma)^-1 * V_{c} for each Gaussian c
+     */
+    void computeVProd(const bob::learn::em::FABase& m);
+    /**
+     * @brief Computes (I+Vt*diag(sigma)^-1*Ni*V)^-1 which occurs in the y
+     * estimation for the given person
+     */
+    void computeIdPlusVProd_i(const size_t id);
+    /**
+     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+     * which occurs in the y estimation of the given person
+     */
+    void computeFn_y_i(const bob::learn::em::FABase& m,
+      const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& stats,
+      const size_t id);
+    /**
+     * @brief Updates y_i (of the current person) and the accumulators to
+     * compute V with the cache values m_cache_IdPlusVprod_i, m_VtSigmaInv and
+     * m_cache_Fn_y_i
+     */
+    void updateY_i(const size_t id);
+    /**
+     * @brief Updates y and the accumulators to compute V
+     */
+    void updateY(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Computes the accumulators m_acc_V_A1 and m_acc_V_A2 for V
+     * V = A2 * A1^-1
+     */
+    void computeAccumulatorsV(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Updates V from the accumulators m_acc_V_A1 and m_acc_V_A2
+     */
+    void updateV(blitz::Array<double,2>& V);
+
+
+    /**** X and U functions ****/
+    /**
+     * @brief Computes Ut * diag(sigma)^-1
+     */
+    void computeUtSigmaInv(const bob::learn::em::FABase& m);
+    /**
+     * @brief Computes Ut_{c} * diag(sigma)^-1 * U_{c} for each Gaussian c
+     */
+    void computeUProd(const bob::learn::em::FABase& m);
+    /**
+     * @brief Computes (I+Ut*diag(sigma)^-1*Ni*U)^-1 which occurs in the x
+     * estimation
+     */
+    void computeIdPlusUProd_ih(const boost::shared_ptr<bob::learn::em::GMMStats>& stats);
+    /**
+     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+     * which occurs in the y estimation of the given person
+     */
+    void computeFn_x_ih(const bob::learn::em::FABase& m,
+      const boost::shared_ptr<bob::learn::em::GMMStats>& stats, const size_t id);
+    /**
+     * @brief Updates x_ih (of the current person/session) and the
+     * accumulators to compute U with the cache values m_cache_IdPlusVprod_i,
+     * m_VtSigmaInv and m_cache_Fn_y_i
+     */
+    void updateX_ih(const size_t id, const size_t h);
+    /**
+     * @brief Updates x
+     */
+    void updateX(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Computes the accumulators m_acc_U_A1 and m_acc_U_A2 for U
+     * U = A2 * A1^-1
+     */
+    void computeAccumulatorsU(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Updates U from the accumulators m_acc_U_A1 and m_acc_U_A2
+     */
+    void updateU(blitz::Array<double,2>& U);
+
+
+    /**** z and D functions ****/
+    /**
+     * @brief Computes diag(D) * diag(sigma)^-1
+     */
+    void computeDtSigmaInv(const bob::learn::em::FABase& m);
+    /**
+     * @brief Computes Dt_{c} * diag(sigma)^-1 * D_{c} for each Gaussian c
+     */
+    void computeDProd(const bob::learn::em::FABase& m);
+    /**
+     * @brief Computes (I+diag(d)t*diag(sigma)^-1*Ni*diag(d))^-1 which occurs
+     * in the z estimation for the given person
+     */
+    void computeIdPlusDProd_i(const size_t id);
+    /**
+     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
+     * which occurs in the y estimation of the given person
+     */
+    void computeFn_z_i(const bob::learn::em::FABase& m,
+      const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& stats, const size_t id);
+    /**
+     * @brief Updates z_i (of the current person) and the accumulators to
+     * compute D with the cache values m_cache_IdPlusDProd_i, m_VtSigmaInv
+     * and m_cache_Fn_z_i
+     */
+    void updateZ_i(const size_t id);
+    /**
+     * @brief Updates z and the accumulators to compute D
+     */
+    void updateZ(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Computes the accumulators m_acc_D_A1 and m_acc_D_A2 for d
+     * d = A2 * A1^-1
+     */
+    void computeAccumulatorsD(const bob::learn::em::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& stats);
+    /**
+     * @brief Updates d from the accumulators m_acc_D_A1 and m_acc_D_A2
+     */
+    void updateD(blitz::Array<double,1>& d);
+
+
+    /**
+     * @brief Get the zeroth order statistics
+     */
+    const std::vector<blitz::Array<double,1> >& getNacc() const
+    { return m_Nacc; }
+    /**
+     * @brief Get the first order statistics
+     */
+    const std::vector<blitz::Array<double,1> >& getFacc() const
+    { return m_Facc; }
+    /**
+     * @brief Get the x speaker factors
+     */
+    const std::vector<blitz::Array<double,2> >& getX() const
+    { return m_x; }
+    /**
+     * @brief Get the y speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getY() const
+    { return m_y; }
+    /**
+     * @brief Get the z speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getZ() const
+    { return m_z; }
+    /**
+     * @brief Set the x speaker factors
+     */
+    void setX(const std::vector<blitz::Array<double,2> >& X)
+    { m_x = X; }
+    /**
+     * @brief Set the y speaker factors
+     */
+    void setY(const std::vector<blitz::Array<double,1> >& y)
+    { m_y = y; }
+    /**
+     * @brief Set the z speaker factors
+     */
+    void setZ(const std::vector<blitz::Array<double,1> >& z)
+    { m_z = z; }
+
+    /**
+     * @brief Initializes the cache to process the given statistics
+     */
+    void initCache();
+
+    /**
+     * @brief Getters for the accumulators
+     */
+    const blitz::Array<double,3>& getAccVA1() const
+    { return m_acc_V_A1; }
+    const blitz::Array<double,2>& getAccVA2() const
+    { return m_acc_V_A2; }
+    const blitz::Array<double,3>& getAccUA1() const
+    { return m_acc_U_A1; }
+    const blitz::Array<double,2>& getAccUA2() const
+    { return m_acc_U_A2; }
+    const blitz::Array<double,1>& getAccDA1() const
+    { return m_acc_D_A1; }
+    const blitz::Array<double,1>& getAccDA2() const
+    { return m_acc_D_A2; }
+
+    /**
+     * @brief Setters for the accumulators, Very useful if the e-Step needs
+     * to be parallelized.
+     */
+    void setAccVA1(const blitz::Array<double,3>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_V_A1);
+      m_acc_V_A1 = acc; }
+    void setAccVA2(const blitz::Array<double,2>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_V_A2);
+      m_acc_V_A2 = acc; }
+    void setAccUA1(const blitz::Array<double,3>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_U_A1);
+      m_acc_U_A1 = acc; }
+    void setAccUA2(const blitz::Array<double,2>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_U_A2);
+      m_acc_U_A2 = acc; }
+    void setAccDA1(const blitz::Array<double,1>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_D_A1);
+      m_acc_D_A1 = acc; }
+    void setAccDA2(const blitz::Array<double,1>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_D_A2);
+      m_acc_D_A2 = acc; }
+
+
+  private:
+    size_t m_Nid; // Number of identities
+    size_t m_dim_C; // Number of Gaussian components of the UBM GMM
+    size_t m_dim_D; // Dimensionality of the feature space
+    size_t m_dim_ru; // Rank of the U subspace
+    size_t m_dim_rv; // Rank of the V subspace
+
+    std::vector<blitz::Array<double,2> > m_x; // matrix x of speaker factors for eigenchannels U, for each client
+    std::vector<blitz::Array<double,1> > m_y; // vector y of spealer factors for eigenvoices V, for each client
+    std::vector<blitz::Array<double,1> > m_z; // vector z of spealer factors for eigenvoices Z, for each client
+
+    std::vector<blitz::Array<double,1> > m_Nacc; // Sum of the zeroth order statistics over the sessions for each client, dimension C
+    std::vector<blitz::Array<double,1> > m_Facc; // Sum of the first order statistics over the sessions for each client, dimension CD
+
+    // Accumulators for the M-step
+    blitz::Array<double,3> m_acc_V_A1;
+    blitz::Array<double,2> m_acc_V_A2;
+    blitz::Array<double,3> m_acc_U_A1;
+    blitz::Array<double,2> m_acc_U_A2;
+    blitz::Array<double,1> m_acc_D_A1;
+    blitz::Array<double,1> m_acc_D_A2;
+
+    // Cache/Precomputation
+    blitz::Array<double,2> m_cache_VtSigmaInv; // Vt * diag(sigma)^-1
+    blitz::Array<double,3> m_cache_VProd; // first dimension is the Gaussian id
+    blitz::Array<double,2> m_cache_IdPlusVProd_i;
+    blitz::Array<double,1> m_cache_Fn_y_i;
+
+    blitz::Array<double,2> m_cache_UtSigmaInv; // Ut * diag(sigma)^-1
+    blitz::Array<double,3> m_cache_UProd; // first dimension is the Gaussian id
+    blitz::Array<double,2> m_cache_IdPlusUProd_ih;
+    blitz::Array<double,1> m_cache_Fn_x_ih;
+
+    blitz::Array<double,1> m_cache_DtSigmaInv; // Dt * diag(sigma)^-1
+    blitz::Array<double,1> m_cache_DProd; // supervector length dimension
+    blitz::Array<double,1> m_cache_IdPlusDProd_i;
+    blitz::Array<double,1> m_cache_Fn_z_i;
+
+    // Working arrays
+    mutable blitz::Array<double,2> m_tmp_ruru;
+    mutable blitz::Array<double,2> m_tmp_ruD;
+    mutable blitz::Array<double,2> m_tmp_rvrv;
+    mutable blitz::Array<double,2> m_tmp_rvD;
+    mutable blitz::Array<double,1> m_tmp_rv;
+    mutable blitz::Array<double,1> m_tmp_ru;
+    mutable blitz::Array<double,1> m_tmp_CD;
+    mutable blitz::Array<double,1> m_tmp_CD_b;
+};
+
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_EM_FABASETRAINER_H */
diff --git a/bob/learn/em/include/bob.learn.em/GMMBaseTrainer.h b/bob/learn/em/include/bob.learn.em/GMMBaseTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..121cdc22484b9ea5bb07439dafd7478183821c13
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/GMMBaseTrainer.h
@@ -0,0 +1,161 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * @brief This class implements the E-step of the expectation-maximisation algorithm for a GMM Machine.
+ * @details See Section 9.2.2 of Bishop, "Pattern recognition and machine learning", 2006
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_GMMBASETRAINER_H
+#define BOB_LEARN_EM_GMMBASETRAINER_H
+
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/GMMStats.h>
+#include <limits>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class implements the E-step of the expectation-maximisation
+ * algorithm for a GMM Machine.
+ * @details See Section 9.2.2 of Bishop,
+ *   "Pattern recognition and machine learning", 2006
+ */
+class GMMBaseTrainer
+{
+  public:
+    /**
+     * @brief Default constructor
+     */
+    GMMBaseTrainer(const bool update_means=true,
+                   const bool update_variances=false, 
+                   const bool update_weights=false,
+                   const double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon());
+
+    /**
+     * @brief Copy constructor
+     */
+    GMMBaseTrainer(const GMMBaseTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~GMMBaseTrainer();
+
+    /**
+     * @brief Initialization before the EM steps
+     */
+    void initialize(bob::learn::em::GMMMachine& gmm);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset,
+     * and saves these as m_ss. Calculates the average
+     * log likelihood of the observations given the GMM,
+     * and returns this in average_log_likelihood.
+     *
+     * The statistics, m_ss, will be used in the mStep() that follows.
+     * Implements EMTrainer::eStep(double &)
+     */
+     void eStep(bob::learn::em::GMMMachine& gmm,
+      const blitz::Array<double,2>& data);
+
+    /**
+     * @brief Computes the likelihood using current estimates of the latent
+     * variables
+     */
+    double computeLikelihood(bob::learn::em::GMMMachine& gmm);
+
+
+    /**
+     * @brief Assigns from a different GMMBaseTrainer
+     */
+    GMMBaseTrainer& operator=(const GMMBaseTrainer &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const GMMBaseTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const GMMBaseTrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const GMMBaseTrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Returns the internal GMM statistics. Useful to parallelize the
+     * E-step
+     */
+    const bob::learn::em::GMMStats getGMMStats() const
+    { return m_ss; }
+
+    /**
+     * @brief Sets the internal GMM statistics. Useful to parallelize the
+     * E-step
+     */
+    void setGMMStats(const bob::learn::em::GMMStats& stats);
+    
+    /**
+     * update means on each iteration
+     */    
+    bool getUpdateMeans()
+    {return m_update_means;}
+    
+    /**
+     * update variances on each iteration
+     */
+    bool getUpdateVariances()
+    {return m_update_variances;}
+
+
+    bool getUpdateWeights()
+    {return m_update_weights;}
+    
+    
+    double getMeanVarUpdateResponsibilitiesThreshold()
+    {return m_mean_var_update_responsibilities_threshold;}
+    
+
+  private:
+  
+    /**
+     * These are the sufficient statistics, calculated during the
+     * E-step and used during the M-step
+     */
+    bob::learn::em::GMMStats m_ss;
+
+
+    /**
+     * update means on each iteration
+     */
+    bool m_update_means;
+
+    /**
+     * update variances on each iteration
+     */
+    bool m_update_variances;
+
+    /**
+     * update weights on each iteration
+     */
+    bool m_update_weights;
+
+    /**
+     * threshold over the responsibilities of the Gaussians
+     * Equations 9.24, 9.25 of Bishop, "Pattern recognition and machine learning", 2006
+     * require a division by the responsibilities, which might be equal to zero
+     * because of numerical issue. This threshold is used to avoid such divisions.
+     */
+    double m_mean_var_update_responsibilities_threshold;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_GMMBASETRAINER_H
diff --git a/bob/learn/em/include/bob.learn.em/GMMMachine.h b/bob/learn/em/include/bob.learn.em/GMMMachine.h
new file mode 100644
index 0000000000000000000000000000000000000000..c3b124621e67ac93ee3c3e85eb2615738b3c7413
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/GMMMachine.h
@@ -0,0 +1,371 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief This class implements a multivariate diagonal Gaussian distribution.
+ * @details See Section 2.3.9 of Bishop, "Pattern recognition and machine learning", 2006
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_GMMMACHINE_H
+#define BOB_LEARN_EM_GMMMACHINE_H
+
+#include <bob.learn.em/Gaussian.h>
+#include <bob.learn.em/GMMStats.h>
+#include <bob.io.base/HDF5File.h>
+#include <iostream>
+#include <boost/shared_ptr.hpp>
+#include <vector>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class implements a multivariate diagonal Gaussian distribution.
+ * @details See Section 2.3.9 of Bishop, "Pattern recognition and machine learning", 2006
+ */
+class GMMMachine
+{
+  public:
+    /**
+     * Default constructor
+     */
+    GMMMachine();
+
+    /**
+     * Constructor
+     * @param[in] n_gaussians  The number of Gaussian components
+     * @param[in] n_inputs     The feature dimensionality
+     */
+    GMMMachine(const size_t n_gaussians, const size_t n_inputs);
+
+    /**
+     * Copy constructor
+     * (Needed because the GMM points to its constituent Gaussian members)
+     */
+    GMMMachine(const GMMMachine& other);
+
+    /**
+     * Constructor from a Configuration
+     */
+    GMMMachine(bob::io::base::HDF5File& config);
+
+    /**
+     * Assignment
+     */
+    GMMMachine& operator=(const GMMMachine &other);
+
+    /**
+     * Equal to
+     */
+    bool operator==(const GMMMachine& b) const;
+
+    /**
+     * Not equal to
+     */
+    bool operator!=(const GMMMachine& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const GMMMachine& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * Destructor
+     */
+    virtual ~GMMMachine();
+
+
+    /**
+     * Reset the input dimensionality, and the number of Gaussian components.
+     * Initialises the weights to uniform distribution.
+     * @param n_gaussians The number of Gaussian components
+     * @param n_inputs    The feature dimensionality
+     */
+    void resize(const size_t n_gaussians, const size_t n_inputs);
+
+
+    /////////////////////////
+    // Getters
+    ////////////////////////
+
+    /**
+     * Get number of inputs
+     */
+    size_t getNInputs() const
+    { return m_n_inputs; }
+
+    /**
+     * Get the weights ("mixing coefficients") of the Gaussian components
+     */
+    const blitz::Array<double,1>& getWeights() const
+    { return m_weights; }
+
+    /**
+     * Get the logarithm of the weights of the Gaussian components
+     */
+    inline const blitz::Array<double,1>& getLogWeights() const
+    { return m_cache_log_weights; }
+
+
+    /**
+     * Get the means
+     */    
+    const blitz::Array<double,2> getMeans() const;
+    
+    /**
+     * Get the mean supervector
+     */
+    void getMeanSupervector(blitz::Array<double,1> &mean_supervector) const;
+    
+     /**
+     * Returns a const reference to the supervector (Put in cache)
+     */
+    const blitz::Array<double,1>& getMeanSupervector() const;
+        
+    /**
+     * Get the variances
+     */
+    const blitz::Array<double,2> getVariances() const;
+    
+    /**
+     * Returns a const reference to the supervector (Put in cache)
+     */
+    const blitz::Array<double,1>& getVarianceSupervector() const;
+    
+
+    /**
+     * Get the variance flooring thresholds for each Gaussian in each dimension
+     */
+    const blitz::Array<double,2> getVarianceThresholds() const;
+
+
+
+    ///////////////////////
+    // Setters
+    ///////////////////////
+
+    /**
+     * Set the weights
+     */
+    void setWeights(const blitz::Array<double,1> &weights);
+
+    /**
+     * Set the means
+     */
+    void setMeans(const blitz::Array<double,2> &means);
+    /**
+     * Set the means from a supervector
+     */
+    void setMeanSupervector(const blitz::Array<double,1> &mean_supervector);
+
+    /**
+     * Set the variances
+     */
+    void setVariances(const blitz::Array<double,2> &variances);
+    /**
+     * Set the variances from a supervector
+     */
+    void setVarianceSupervector(const blitz::Array<double,1> &variance_supervector);
+
+    /**
+     * Set the variance flooring thresholds in each dimension
+     */
+    void setVarianceThresholds(const double value);
+    /**
+     * Set the variance flooring thresholds in each dimension
+     * (equal for all Gaussian components)
+     */
+    void setVarianceThresholds(blitz::Array<double,1> variance_thresholds);
+    /**
+     * Set the variance flooring thresholds for each Gaussian in each dimension
+     */
+    void setVarianceThresholds(const blitz::Array<double,2> &variance_thresholds);
+
+
+    ////////////////
+    // Methods
+    /////////////////
+
+    /**
+     * Get the weights in order to be updated
+     * ("mixing coefficients") of the Gaussian components
+     * @warning Only trainers should use this function for efficiency reason
+     */
+    inline blitz::Array<double,1>& updateWeights()
+    { return m_weights; }
+
+
+    /**
+     * Update the log of the weights in cache
+     * @warning Should be used by trainer only when using updateWeights()
+     */
+    void recomputeLogWeights() const;
+
+
+
+    /**
+     * Output the log likelihood of the sample, x, i.e. log(p(x|GMMMachine))
+     * @param[in]  x                                 The sample
+     * @param[out] log_weighted_gaussian_likelihoods For each Gaussian, i: log(weight_i*p(x|Gaussian_i))
+     * @return     The GMMMachine log likelihood, i.e. log(p(x|GMMMachine))
+     * Dimensions of the parameters are checked
+     */
+    double logLikelihood(const blitz::Array<double, 1> &x, blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const;
+
+    /**
+     * Output the log likelihood of the sample, x, i.e. log(p(x|GMMMachine))
+     * @param[in]  x                                 The sample
+     * @param[out] log_weighted_gaussian_likelihoods For each Gaussian, i: log(weight_i*p(x|Gaussian_i))
+     * @return     The GMMMachine log likelihood, i.e. log(p(x|GMMMachine))
+     * @warning Dimensions of the parameters are not checked
+     */
+    double logLikelihood_(const blitz::Array<double, 1> &x, blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const;
+
+    /**
+     * Output the log likelihood of the sample, x, i.e. log(p(x|GMM))
+     * @param[in]  x The sample
+     * Dimension of the input is checked
+     */
+    double logLikelihood(const blitz::Array<double, 1> &x) const;
+
+    /**
+     * Output the log likelihood of the sample, x, i.e. log(p(x|GMM))
+     * @param[in]  x The sample
+     * @warning Dimension of the input is not checked
+     */
+    double logLikelihood_(const blitz::Array<double, 1> &x) const;
+
+    /**
+     * Accumulates the GMM statistics over a set of samples.
+     * @see bool accStatistics(const blitz::Array<double,1> &x, GMMStats stats)
+     * Dimensions of the parameters are checked
+     */
+    void accStatistics(const blitz::Array<double,2>& input, GMMStats &stats) const;
+
+    /**
+     * Accumulates the GMM statistics over a set of samples.
+     * @see bool accStatistics(const blitz::Array<double,1> &x, GMMStats stats)
+     * @warning Dimensions of the parameters are not checked
+     */
+    void accStatistics_(const blitz::Array<double,2>& input, GMMStats &stats) const;
+
+    /**
+     * Accumulate the GMM statistics for this sample.
+     *
+     * @param[in]  x     The current sample
+     * @param[out] stats The accumulated statistics
+     * Dimensions of the parameters are checked
+     */
+    void accStatistics(const blitz::Array<double,1> &x, GMMStats &stats) const;
+
+    /**
+     * Accumulate the GMM statistics for this sample.
+     *
+     * @param[in]  x     The current sample
+     * @param[out] stats The accumulated statistics
+     * @warning Dimensions of the parameters are not checked
+     */
+    void accStatistics_(const blitz::Array<double,1> &x, GMMStats &stats) const;
+
+
+    /**
+     * Get a pointer to a particular Gaussian component
+     * @param[in] i The index of the Gaussian component
+     * @return A smart pointer to the i'th Gaussian component
+     *         if it exists, otherwise throws an exception
+     */
+    boost::shared_ptr<bob::learn::em::Gaussian> getGaussian(const size_t i);
+
+
+    /**
+     * Return the number of Gaussian components
+     */
+    inline size_t getNGaussians() const
+    { return m_n_gaussians; }
+
+    /**
+     * Save to a Configuration
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * Load from a Configuration
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * Load/Reload mean/variance supervector in cache
+     */
+    void reloadCacheSupervectors() const;
+
+    friend std::ostream& operator<<(std::ostream& os, const GMMMachine& machine);
+
+
+  private:
+    /**
+     * Copy another GMMMachine
+     */
+    void copy(const GMMMachine&);
+
+    /**
+     * The number of Gaussian components
+     */
+    size_t m_n_gaussians;
+
+    /**
+     * The feature dimensionality
+     */
+    size_t m_n_inputs;
+
+    /**
+     * The Gaussian components
+     */
+    std::vector<boost::shared_ptr<Gaussian> > m_gaussians;
+
+    /**
+     * The weights (also known as "mixing coefficients")
+     */
+    blitz::Array<double,1> m_weights;
+
+    /**
+     * Update the mean and variance supervectors
+     * in cache (into a 1D blitz array)
+     */
+    void updateCacheSupervectors() const;
+
+    /**
+     * Initialise the cache members (allocate arrays)
+     */
+    void initCache() const;
+
+    /**
+     * Accumulate the GMM statistics for this sample.
+     * Called by accStatistics() and accStatistics_()
+     *
+     * @param[in]  x     The current sample
+     * @param[out] stats The accumulated statistics
+     * @param[in]  log_likelihood  The current log_likelihood
+     * @warning Dimensions of the parameters are not checked
+     */
+    void accStatisticsInternal(const blitz::Array<double,1> &x,
+      GMMStats &stats, const double log_likelihood) const;
+
+
+    /// Some cache arrays to avoid re-allocation when computing log-likelihoods
+    mutable blitz::Array<double,1> m_cache_log_weights;
+    mutable blitz::Array<double,1> m_cache_log_weighted_gaussian_likelihoods;
+    mutable blitz::Array<double,1> m_cache_P;
+    mutable blitz::Array<double,2> m_cache_Px;
+
+    mutable blitz::Array<double,1> m_cache_mean_supervector;
+    mutable blitz::Array<double,1> m_cache_variance_supervector;
+    mutable bool m_cache_supervector;
+
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_GMMMACHINE_H
diff --git a/bob/learn/em/include/bob.learn.em/GMMStats.h b/bob/learn/em/include/bob.learn.em/GMMStats.h
new file mode 100644
index 0000000000000000000000000000000000000000..af56f05ce1d6a4285d4bd3d43f0ac7b9561214a9
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/GMMStats.h
@@ -0,0 +1,141 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_GMMSTATS_H
+#define BOB_LEARN_EM_GMMSTATS_H
+
+#include <blitz/array.h>
+#include <bob.io.base/HDF5File.h>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief A container for GMM statistics.
+ * @see GMMMachine
+ *
+ * With respect to Reynolds, "Speaker Verification Using Adapted
+ * Gaussian Mixture Models", DSP, 2000:
+ * Eq (8) is n(i)
+ * Eq (9) is sumPx(i) / n(i)
+ * Eq (10) is sumPxx(i) / n(i)
+ */
+class GMMStats {
+  public:
+
+    /**
+     * Default constructor.
+     */
+    GMMStats();
+
+    /**
+     * Constructor.
+     * @param n_gaussians Number of Gaussians in the mixture model.
+     * @param n_inputs    Feature dimensionality.
+     */
+    GMMStats(const size_t n_gaussians, const size_t n_inputs);
+
+    /**
+     * Copy constructor
+     */
+    GMMStats(const GMMStats& other);
+
+    /**
+     * Constructor (from a Configuration)
+     */
+    GMMStats(bob::io::base::HDF5File& config);
+
+    /**
+     * Assigment
+     */
+    GMMStats& operator=(const GMMStats& other);
+
+    /**
+     * Equal to
+     */
+    bool operator==(const GMMStats& b) const;
+
+    /**
+     * Not Equal to
+     */
+    bool operator!=(const GMMStats& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const GMMStats& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * Updates a GMMStats with another GMMStats
+     */
+    void operator+=(const GMMStats& b);
+
+    /**
+     * Destructor
+     */
+    ~GMMStats();
+
+    /**
+     * Allocates space for the statistics and resets to zero.
+     * @param n_gaussians Number of Gaussians in the mixture model.
+     * @param n_inputs    Feature dimensionality.
+     */
+    void resize(const size_t n_gaussians, const size_t n_inputs);
+
+    /**
+     * Resets statistics to zero.
+     */
+    void init();
+
+    /**
+     * The accumulated log likelihood of all samples
+     */
+    double log_likelihood;
+
+    /**
+     * The accumulated number of samples
+     */
+    size_t T;
+
+    /**
+     * For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of P(gaussian_i|x)
+     */
+    blitz::Array<double,1> n;
+
+    /**
+     * For each Gaussian, the accumulated sum of responsibility times the sample
+     */
+    blitz::Array<double,2> sumPx;
+
+    /**
+     * For each Gaussian, the accumulated sum of responsibility times the sample squared
+     */
+    blitz::Array<double,2> sumPxx;
+
+    /**
+     * Save to a Configuration
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * Load from a Configuration
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    friend std::ostream& operator<<(std::ostream& os, const GMMStats& g);
+
+  private:
+    /**
+     * Copy another GMMStats
+     */
+    void copy(const GMMStats&);
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_GMMSTATS_H
diff --git a/bob/learn/em/include/bob.learn.em/Gaussian.h b/bob/learn/em/include/bob.learn.em/Gaussian.h
new file mode 100644
index 0000000000000000000000000000000000000000..023f0080b1d2fe7a56c18e114b748d0ac629fcd6
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/Gaussian.h
@@ -0,0 +1,247 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_GAUSSIAN_H
+#define BOB_LEARN_EM_GAUSSIAN_H
+
+#include <bob.io.base/HDF5File.h>
+#include <blitz/array.h>
+#include <limits>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class implements a multivariate diagonal Gaussian distribution.
+ */
+class Gaussian
+{
+  public:
+    /**
+     * Default constructor
+     */
+    Gaussian();
+
+    /**
+     * Constructor
+     * @param[in] n_inputs The feature dimensionality
+     */
+    Gaussian(const size_t n_inputs);
+
+    /**
+     * Destructor
+     */
+    virtual ~Gaussian();
+
+    /**
+     * Copy constructor
+     */
+    Gaussian(const Gaussian& other);
+
+    /**
+     * Constructs from a configuration file
+     */
+    Gaussian(bob::io::base::HDF5File& config);
+
+    /**
+     * Assignment
+     */
+    Gaussian& operator=(const Gaussian &other);
+
+    /**
+     * Equal to
+     */
+    bool operator==(const Gaussian& b) const;
+    /**
+     * Not equal to
+     */
+    bool operator!=(const Gaussian& b) const;
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const Gaussian& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * Set the input dimensionality, reset the mean to zero
+     * and the variance to one.
+     * @see resize()
+     * @param n_inputs The feature dimensionality
+     * @warning The mean and variance are not initialized
+     */
+    void setNInputs(const size_t n_inputs);
+
+    /**
+     * Get the input dimensionality
+     */
+    size_t getNInputs() const
+    { return m_n_inputs; }
+
+    /**
+     * Set the input dimensionality, reset the mean to zero
+     * and the variance to one.
+     * @see setNInputs()
+     * @param n_inputs The feature dimensionality
+     */
+    void resize(const size_t n_inputs);
+
+    /**
+     * Get the mean
+     */
+    inline const blitz::Array<double,1>& getMean() const
+    { return m_mean; }
+
+    /**
+     * Get the mean in order to be updated
+     * @warning Only trainers should use this function for efficiency reason
+     */
+    inline blitz::Array<double,1>& updateMean()
+    { return m_mean; }
+
+    /**
+     * Set the mean
+     */
+    void setMean(const blitz::Array<double,1> &mean);
+
+    /**
+     * Get the variance (the diagonal of the covariance matrix)
+     */
+    inline const blitz::Array<double,1>& getVariance() const
+    { return m_variance; }
+
+    /**
+     * Get the variance in order to be updated
+     * @warning Only trainers should use this function for efficiency reason
+     */
+    inline blitz::Array<double,1>& updateVariance()
+    { return m_variance; }
+
+    /**
+     * Set the variance
+     */
+    void setVariance(const blitz::Array<double,1> &variance);
+
+    /**
+     * Get the variance flooring thresholds
+     */
+    const blitz::Array<double,1>& getVarianceThresholds() const
+    { return m_variance_thresholds; }
+
+    /**
+     * Get the variance thresholds in order to be updated
+     * @warning Only trainers should use this function for efficiency reason
+     */
+    inline blitz::Array<double,1>& updateVarianceThreshods()
+    { return m_variance_thresholds; }
+
+    /**
+     * Set the variance flooring thresholds
+     */
+    void setVarianceThresholds(const blitz::Array<double,1> &variance_thresholds);
+
+    /**
+     * Set the variance flooring thresholds
+     */
+    void setVarianceThresholds(const double value);
+
+    /**
+     * Apply the variance flooring thresholds
+     * This method is called when using setVarianceThresholds()
+     * @warning It is only useful when using updateVarianceThreshods(),
+     * and should mostly be done by trainers
+     */
+    void applyVarianceThresholds();
+
+    /**
+     * Output the log likelihood of the sample, x
+     * @param x The data sample (feature vector)
+     */
+    double logLikelihood(const blitz::Array<double,1>& x) const;
+
+    /**
+     * Output the log likelihood of the sample, x
+     * @param x The data sample (feature vector)
+     * @warning The input is NOT checked
+     */
+    double logLikelihood_(const blitz::Array<double,1>& x) const;
+
+    /**
+     * Saves to a Configuration
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * Loads from a Configuration
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * Prints a Gaussian in the output stream
+     */
+    friend std::ostream& operator<<(std::ostream& os, const bob::learn::em::Gaussian& g);
+
+
+  private:
+    /**
+     * Copies another Gaussian
+     */
+    void copy(const Gaussian& other);
+
+    /**
+     * Computes n_inputs * log(2*pi)
+     */
+    void preComputeNLog2Pi();
+
+    /**
+     * Computes and stores the value of g_norm,
+     * to later speed up evaluation of logLikelihood()
+     * Note: g_norm is defined as follows:
+     * log(Gaussian pdf) = log(1/((2pi)^(k/2)(det)^(1/2)) * exp(...))
+     *                   = -1/2 * g_norm * (...)
+     */
+    void preComputeConstants();
+
+    /**
+     * The mean vector of the Gaussian
+     */
+    blitz::Array<double,1> m_mean;
+
+    /**
+     * The diagonal of the covariance matrix (assumed to be diagonal)
+     */
+    blitz::Array<double,1> m_variance;
+
+    /**
+     * The variance flooring thresholds, i.e. the minimum allowed
+     * value of variance in each dimension.
+     * The variance will be set to this value if an attempt is made
+     * to set it to a smaller value.
+     */
+    blitz::Array<double,1> m_variance_thresholds;
+
+    /**
+     * A constant that depends only on the feature dimensionality
+     * m_n_log2pi = n_inputs * log(2*pi) (used to compute m_gnorm)
+     */
+    double m_n_log2pi;
+
+    /**
+     * A constant that depends only on the feature dimensionality
+     * (m_n_inputs) and the variance
+     * @see bool preComputeConstants()
+     */
+    double m_g_norm;
+
+    /**
+     * The number of inputs (feature dimensionality)
+     */
+    size_t m_n_inputs;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_GAUSSIAN_H
diff --git a/bob/learn/em/include/bob.learn.em/ISVBase.h b/bob/learn/em/include/bob.learn.em/ISVBase.h
new file mode 100644
index 0000000000000000000000000000000000000000..477d1972e44a59e6f8d38705256e71162ab46353
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/ISVBase.h
@@ -0,0 +1,228 @@
+/**
+ * @date Tue Jan 27 16:02:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief A base class for Joint Factor Analysis-like machines
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_ISVBASE_H
+#define BOB_LEARN_EM_ISVBASE_H
+
+#include <stdexcept>
+
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/FABase.h>
+
+#include <bob.io.base/HDF5File.h>
+#include <boost/shared_ptr.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+
+/**
+ * @brief An ISV Base class which contains U and D matrices
+ * TODO: add a reference to the journal articles
+ */
+class ISVBase
+{
+  public:
+    /**
+     * @brief Default constructor. Builds an otherwise invalid 0 x 0 ISVBase
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     */
+    ISVBase();
+
+    /**
+     * @brief Constructor. Builds a new ISVBase.
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     *
+     * @param ubm The Universal Background Model
+     * @param ru size of U (CD x ru)
+     * @warning ru SHOULD BE >= 1.
+     */
+    ISVBase(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm, const size_t ru=1);
+
+    /**
+     * @brief Copy constructor
+     */
+    ISVBase(const ISVBase& other);
+
+    /**
+     * @deprecated Starts a new JFAMachine from an existing Configuration object.
+     */
+    ISVBase(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Just to virtualise the destructor
+     */
+    virtual ~ISVBase();
+
+    /**
+     * @brief Assigns from a different JFA machine
+     */
+    ISVBase& operator=(const ISVBase &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const ISVBase& b) const
+    { return m_base.operator==(b.m_base); }
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const ISVBase& b) const
+    { return m_base.operator!=(b.m_base); }
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const ISVBase& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const
+    { return m_base.is_similar_to(b.m_base, r_epsilon, a_epsilon); }
+
+    /**
+     * @brief Saves machine to an HDF5 file
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets
+     * the current state.
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Returns the UBM
+     */
+    const boost::shared_ptr<bob::learn::em::GMMMachine> getUbm() const
+    { return m_base.getUbm(); }
+
+    /**
+     * @brief Returns the U matrix
+     */
+    const blitz::Array<double,2>& getU() const
+    { return m_base.getU(); }
+
+    /**
+     * @brief Returns the diagonal matrix diag(d) (as a 1D vector)
+     */
+    const blitz::Array<double,1>& getD() const
+    { return m_base.getD(); }
+
+    /**
+     * @brief Returns the number of Gaussian components C
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNGaussians() const
+    { return m_base.getNGaussians(); }
+
+    /**
+     * @brief Returns the feature dimensionality D
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNInputs() const
+    { return m_base.getNInputs(); }
+
+    /**
+     * @brief Returns the supervector length CD
+     * (CxD: Number of Gaussian components by the feature dimensionality)
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getSupervectorLength() const
+    { return m_base.getSupervectorLength(); }
+
+    /**
+     * @brief Returns the size/rank ru of the U matrix
+     */
+    const size_t getDimRu() const
+    { return m_base.getDimRu(); }
+
+    /**
+     * @brief Resets the dimensionality of the subspace U
+     * U is hence uninitialized.
+     */
+    void resize(const size_t ru)
+    { m_base.resize(ru, 1);
+      blitz::Array<double,2>& V = m_base.updateV();
+      V = 0;
+     }
+
+    /**
+     * @brief Returns the U matrix in order to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,2>& updateU()
+    { return m_base.updateU(); }
+
+    /**
+     * @brief Returns the diagonal matrix diag(d) (as a 1D vector) in order
+     * to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,1>& updateD()
+    { return m_base.updateD(); }
+
+
+    /**
+     * @brief Sets (the mean supervector of) the Universal Background Model
+     * U, V and d are uninitialized in case of dimensions update (C or D)
+     */
+    void setUbm(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm)
+    { m_base.setUbm(ubm); }
+
+    /**
+     * @brief Sets the U matrix
+     */
+    void setU(const blitz::Array<double,2>& U)
+    { m_base.setU(U); }
+
+    /**
+     * @brief Sets the diagonal matrix diag(d)
+     * (a 1D vector is expected as an argument)
+     */
+    void setD(const blitz::Array<double,1>& d)
+    { m_base.setD(d); }
+
+    /**
+     * @brief Estimates x from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateX(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    { m_base.estimateX(gmm_stats, x); }
+
+    /**
+     * @brief Precompute (put U^{T}.Sigma^{-1} matrix in cache)
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    void precompute()
+    { m_base.updateCacheUbmUVD(); }
+
+    /**
+     * @brief Returns the FABase member
+     */
+    const bob::learn::em::FABase& getBase() const
+    { return m_base; }
+
+
+  private:
+    // FABase
+    bob::learn::em::FABase m_base;
+};
+
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_JFABASE_H
diff --git a/bob/learn/em/include/bob.learn.em/ISVMachine.h b/bob/learn/em/include/bob.learn.em/ISVMachine.h
new file mode 100644
index 0000000000000000000000000000000000000000..8b941bce4c17bc05acb1be4ed9ce39e2da6de989
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/ISVMachine.h
@@ -0,0 +1,230 @@
+/**
+ * @date Tue Jan 27 16:06:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief A base class for Joint Factor Analysis-like machines
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_ISVMACHINE_H
+#define BOB_LEARN_EM_ISVMACHINE_H
+
+#include <stdexcept>
+
+#include <bob.learn.em/ISVBase.h>
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/LinearScoring.h>
+
+#include <bob.io.base/HDF5File.h>
+#include <boost/shared_ptr.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+
+/**
+ * @brief A ISVMachine which is associated to a ISVBase that contains
+ *   U D matrices.
+ * TODO: add a reference to the journal articles
+ */
+class ISVMachine
+{
+  public:
+    /**
+     * @brief Default constructor. Builds an otherwise invalid 0 x 0 ISVMachine
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     */
+    ISVMachine();
+
+    /**
+     * @brief Constructor. Builds a new ISVMachine.
+     *
+     * @param isv_base The ISVBase associated with this machine
+     */
+    ISVMachine(const boost::shared_ptr<bob::learn::em::ISVBase> isv_base);
+
+    /**
+     * @brief Copy constructor
+     */
+    ISVMachine(const ISVMachine& other);
+
+    /**
+     * @brief Starts a new ISVMachine from an existing Configuration object.
+     */
+    ISVMachine(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Just to virtualise the destructor
+     */
+    virtual ~ISVMachine();
+
+    /**
+     * @brief Assigns from a different ISV machine
+     */
+    ISVMachine& operator=(const ISVMachine &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const ISVMachine& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const ISVMachine& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const ISVMachine& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Saves machine to an HDF5 file
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets
+     * the current state.
+     */
+    void load(bob::io::base::HDF5File& config);
+
+
+    /**
+     * @brief Returns the number of Gaussian components C
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNGaussians() const
+    { return m_isv_base->getNGaussians(); }
+
+    /**
+     * @brief Returns the feature dimensionality D
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNInputs() const
+    { return m_isv_base->getNInputs(); }
+
+    /**
+     * @brief Returns the supervector length CD
+     * (CxD: Number of Gaussian components by the feature dimensionality)
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getSupervectorLength() const
+    { return m_isv_base->getSupervectorLength(); }
+
+    /**
+     * @brief Returns the size/rank ru of the U matrix
+     */
+    const size_t getDimRu() const
+    { return m_isv_base->getDimRu(); }
+
+    /**
+     * @brief Returns the x session factor
+     */
+    const blitz::Array<double,1>& getX() const
+    { return m_cache_x; }
+
+    /**
+     * @brief Returns the z speaker factor
+     */
+    const blitz::Array<double,1>& getZ() const
+    { return m_z; }
+
+    /**
+     * @brief Returns the z speaker factors in order to update it
+     */
+    blitz::Array<double,1>& updateZ()
+    { return m_z; }
+
+    /**
+     * @brief Returns the V matrix
+     */
+    void setZ(const blitz::Array<double,1>& z);
+
+    /**
+     * @brief Returns the ISVBase
+     */
+    const boost::shared_ptr<bob::learn::em::ISVBase> getISVBase() const
+    { return m_isv_base; }
+
+    /**
+     * @brief Sets the ISVBase
+     */
+    void setISVBase(const boost::shared_ptr<bob::learn::em::ISVBase> isv_base);
+
+
+    /**
+     * @brief Estimates x from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateX(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    { m_isv_base->estimateX(gmm_stats, x); }
+    /**
+     * @brief Estimates Ux from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateUx(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
+
+   /**
+    * @brief Execute the machine
+    *
+    * @param input input data used by the machine
+    * @warning Inputs are checked
+    * @return score value computed by the machine    
+    */
+    double forward(const bob::learn::em::GMMStats& input);
+    /**
+     * @brief Computes a score for the given UBM statistics and given the
+     * Ux vector
+     */
+    double forward(const bob::learn::em::GMMStats& gmm_stats,
+      const blitz::Array<double,1>& Ux);
+
+    /**
+     * @brief Execute the machine
+     *
+     * @param input input data used by the machine
+     * @warning Inputs are NOT checked
+     * @return score value computed by the machine     
+     */
+    double forward_(const bob::learn::em::GMMStats& input);
+
+  private:
+    /**
+     * @brief Resize latent variable according to the ISVBase
+     */
+    void resize();
+    /**
+     * @ Update cache
+     */
+    void updateCache();
+    /**
+     * @brief Resize working arrays
+     */
+    void resizeTmp();
+
+    // UBM
+    boost::shared_ptr<bob::learn::em::ISVBase> m_isv_base;
+
+    // y and z vectors/factors learned during the enrolment procedure
+    blitz::Array<double,1> m_z;
+
+    // cache
+    blitz::Array<double,1> m_cache_mDz;
+    mutable blitz::Array<double,1> m_cache_x;
+
+    // x vector/factor in cache when computing scores
+    mutable blitz::Array<double,1> m_tmp_Ux;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_ISVMACHINE_H
diff --git a/bob/learn/em/include/bob.learn.em/ISVTrainer.h b/bob/learn/em/include/bob.learn.em/ISVTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..e314ad6052253057f83958d3a4982e1a097dedcf
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/ISVTrainer.h
@@ -0,0 +1,154 @@
+/**
+ * @date Tue Jul 19 12:16:17 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief JFA functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_ISVTRAINER_H
+#define BOB_LEARN_EM_ISVTRAINER_H
+
+#include <blitz/array.h>
+#include <bob.learn.em/GMMStats.h>
+#include <bob.learn.em/FABaseTrainer.h>
+#include <bob.learn.em/ISVMachine.h>
+#include <vector>
+
+#include <map>
+#include <string>
+#include <bob.core/array_copy.h>
+#include <boost/shared_ptr.hpp>
+#include <boost/random.hpp>
+#include <bob.core/logging.h>
+
+namespace bob { namespace learn { namespace em {
+
+class ISVTrainer
+{
+  public:
+    /**
+     * @brief Constructor
+     */
+    ISVTrainer(const double relevance_factor=4.);
+
+    /**
+     * @brief Copy onstructor
+     */
+    ISVTrainer(const ISVTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~ISVTrainer();
+
+    /**
+     * @brief Assignment operator
+     */
+    ISVTrainer& operator=(const ISVTrainer& other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const ISVTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const ISVTrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const ISVTrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief This methods performs some initialization before the EM loop.
+     */
+    virtual void initialize(bob::learn::em::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset
+     * The statistics will be used in the mStep() that follows.
+     */
+    virtual void eStep(bob::learn::em::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+
+    /**
+     * @brief Performs a maximization step to update the parameters of the
+     * factor analysis model.
+     */
+    virtual void mStep(bob::learn::em::ISVBase& machine);
+
+    /**
+     * @brief Computes the average log likelihood using the current estimates
+     * of the latent variables.
+     */
+    virtual double computeLikelihood(bob::learn::em::ISVBase& machine);
+
+    /**
+     * @brief Enrol a client
+     */
+    void enrol(bob::learn::em::ISVMachine& machine,
+      const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& features,
+      const size_t n_iter);
+
+    /**
+     * @brief Get the x speaker factors
+     */
+    const std::vector<blitz::Array<double,2> >& getX() const
+    { return m_base_trainer.getX(); }
+    /**
+     * @brief Get the z speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getZ() const
+    { return m_base_trainer.getZ(); }
+    /**
+     * @brief Set the x speaker factors
+     */
+    void setX(const std::vector<blitz::Array<double,2> >& X)
+    { m_base_trainer.setX(X); }
+    /**
+     * @brief Set the z speaker factors
+     */
+    void setZ(const std::vector<blitz::Array<double,1> >& z)
+    { m_base_trainer.setZ(z); }
+
+    /**
+     * @brief Getters for the accumulators
+     */
+    const blitz::Array<double,3>& getAccUA1() const
+    { return m_base_trainer.getAccUA1(); }
+    const blitz::Array<double,2>& getAccUA2() const
+    { return m_base_trainer.getAccUA2(); }
+
+    /**
+     * @brief Setters for the accumulators, Very useful if the e-Step needs
+     * to be parallelized.
+     */
+    void setAccUA1(const blitz::Array<double,3>& acc)
+    { m_base_trainer.setAccUA1(acc); }
+    void setAccUA2(const blitz::Array<double,2>& acc)
+    { m_base_trainer.setAccUA2(acc); }
+
+
+  private:
+    /**
+     * @brief Initialize D to sqrt(ubm_var/relevance_factor)
+     */
+    void initializeD(bob::learn::em::ISVBase& machine) const;
+
+    // Attributes
+    bob::learn::em::FABaseTrainer m_base_trainer;
+
+    double m_relevance_factor;
+
+    boost::shared_ptr<boost::mt19937> m_rng; ///< The random number generator for the inialization};
+};
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_EM_ISVTRAINER_H */
diff --git a/bob/learn/em/include/bob.learn.em/IVectorMachine.h b/bob/learn/em/include/bob.learn.em/IVectorMachine.h
new file mode 100644
index 0000000000000000000000000000000000000000..59c67493a44bfb048feb27dba39b3d18ed0b90bc
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/IVectorMachine.h
@@ -0,0 +1,274 @@
+/**
+ * @date Sat Mar 30 20:55:00 2013 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_IVECTOR_MACHINE_H
+#define BOB_LEARN_EM_IVECTOR_MACHINE_H
+
+#include <blitz/array.h>
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/GMMStats.h>
+#include <bob.io.base/HDF5File.h>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief An IVectorMachine consists of a Total Variability subspace \f$T\f$
+ *   and allows the extraction of IVector\n
+ * Reference:\n
+ * "Front-End Factor Analysis For Speaker Verification",
+ *    N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet,
+ *   IEEE Trans. on Audio, Speech and Language Processing
+ */
+class IVectorMachine
+{
+  public:
+    /**
+     * @brief Default constructor. Builds an IVectorMachine.
+     * The Universal Background Model and the matrices \f$T\f$ and
+     * \f$diag(\Sigma)\f$ are not initialized.
+     */
+    IVectorMachine();
+
+    /**
+     * @brief Constructor. Builds a new IVectorMachine.
+     * The Universal Background Model and the matrices \f$T\f$ and
+     * \f$diag(\Sigma)\f$ are not initialized.
+     *
+     * @param ubm The Universal Background Model
+     * @param rt size of \f$T\f$ (CD x rt)
+     * @param variance_threshold variance flooring threshold for the
+     *   \f$\Sigma\f$ (diagonal) matrix
+     * @warning rt SHOULD BE >= 1.
+     */
+    IVectorMachine(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
+      const size_t rt=1, const double variance_threshold=1e-10);
+
+    /**
+     * @brief Copy constructor
+     */
+    IVectorMachine(const IVectorMachine& other);
+
+    /**
+     * @brief Starts a new IVectorMachine from an existing Configuration object.
+     */
+    IVectorMachine(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~IVectorMachine();
+
+    /**
+     * @brief Assigns from a different IVectorMachine
+     */
+    IVectorMachine& operator=(const IVectorMachine &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const IVectorMachine& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const IVectorMachine& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const IVectorMachine& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Saves model to an HDF5 file
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets
+     * the current state.
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Returns the UBM
+     */
+    const boost::shared_ptr<bob::learn::em::GMMMachine> getUbm() const
+    { return m_ubm; }
+
+    /**
+     * @brief Returns the \f$T\f$ matrix
+     */
+    const blitz::Array<double,2>& getT() const
+    { return m_T; }
+
+    /**
+     * @brief Returns the \f$\Sigma\f$ (diagonal) matrix as a 1D array
+     */
+    const blitz::Array<double,1>& getSigma() const
+    { return m_sigma; }
+
+    /**
+     * @brief Gets the variance flooring threshold
+     */
+    const double getVarianceThreshold() const
+    { return m_variance_threshold; }
+
+    /**
+     * @brief Returns the number of Gaussian components C.
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNGaussians() const
+    { return m_ubm->getNGaussians(); }
+
+    /**
+     * @brief Returns the feature dimensionality D.
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNInputs() const
+    { return m_ubm->getNInputs(); }
+
+    /**
+     * @brief Returns the supervector length CD.
+     * (CxD: Number of Gaussian components by the feature dimensionality)
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getSupervectorLength() const
+    { return m_ubm->getNGaussians()*m_ubm->getNInputs(); }
+
+    /**
+     * @brief Returns the size/rank rt of the \f$T\f$ matrix
+     */
+    const size_t getDimRt() const
+    { return m_rt; }
+
+    /**
+     * @brief Resets the dimensionality of the subspace \f$T\f$.
+     * \f$T\f$ is hence uninitialized.
+     */
+    void resize(const size_t rt);
+
+    /**
+     * @brief Returns the \f$T\f$ matrix in order to update it.
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,2>& updateT()
+    { return m_T; }
+
+    /**
+     * @brief Returns the \f$\Sigma\f$ (diagonal) matrix in order to update it.
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,1>& updateSigma()
+    { return m_sigma; }
+
+    /**
+     * @brief Sets (the mean supervector of) the Universal Background Model.
+     * \f$T\f$ and \f$\Sigma\f$ are uninitialized in case of dimensions update (C or D)
+     */
+    void setUbm(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm);
+
+    /**
+     * @brief Sets the \f$T\f$ matrix
+     */
+    void setT(const blitz::Array<double,2>& T);
+
+    /**
+     * @brief Sets the \f$\Sigma\f$ (diagonal) matrix
+     */
+    void setSigma(const blitz::Array<double,1>& sigma);
+
+    /**
+     * @brief Set the variance flooring threshold
+     */
+    void setVarianceThreshold(const double value);
+
+    /**
+     * @brief Update arrays in cache
+     * @warning It is only useful when using updateT() or updateSigma()
+     * and should mostly be done by trainers
+     */
+    void precompute();
+
+    /**
+     * @brief Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
+     * @warning No check is perform
+     */
+    void computeIdTtSigmaInvT(const bob::learn::em::GMMStats& input, blitz::Array<double,2>& output) const;
+
+    /**
+     * @brief Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
+     * @warning No check is perform
+     */
+    void computeTtSigmaInvFnorm(const bob::learn::em::GMMStats& input, blitz::Array<double,1>& output) const;
+
+    /**
+     * @brief Extracts an ivector from the input GMM statistics
+     *
+     * @param input GMM statistics to be used by the machine
+     * @param output I-vector computed by the machine
+     */
+    void forward(const bob::learn::em::GMMStats& input, blitz::Array<double,1>& output) const;
+
+    /**
+     * @brief Extracts an ivector from the input GMM statistics
+     *
+     * @param input GMM statistics to be used by the machine
+     * @param output I-vector computed by the machine
+     * @warning Inputs are NOT checked
+     */
+    void forward_(const bob::learn::em::GMMStats& input, blitz::Array<double,1>& output) const;
+
+  private:
+    /**
+     * @brief Apply the variance flooring thresholds.
+     * This method is called when using setVarianceThresholds()
+     */
+    void applyVarianceThreshold();
+
+    /**
+     * @brief Resize cache
+     */
+    void resizeCache();
+    /**
+     * @brief Resize working arrays
+     */
+    void resizeTmp();
+    /**
+     * @brief Resize cache and working arrays before updating cache
+     */
+    void resizePrecompute();
+
+    // UBM
+    boost::shared_ptr<bob::learn::em::GMMMachine> m_ubm;
+
+    // dimensionality
+    size_t m_rt; ///< size of \f$T\f$ (CD x rt)
+
+    ///< \f$T\f$ and \f$Sigma\f$ matrices.
+    ///< \f$Sigma\f$ is assumed to be diagonal, and only the diagonal is stored
+    blitz::Array<double,2> m_T; ///< The total variability matrix \f$T\f$
+    blitz::Array<double,1> m_sigma; ///< The diagonal covariance matrix \f$\Sigma\f$
+    double m_variance_threshold; ///< The variance flooring threshold
+
+    blitz::Array<double,3> m_cache_Tct_sigmacInv;
+    blitz::Array<double,3> m_cache_Tct_sigmacInv_Tc;
+
+    mutable blitz::Array<double,1> m_tmp_d;
+    mutable blitz::Array<double,1> m_tmp_t1;
+    mutable blitz::Array<double,1> m_tmp_t2;
+    mutable blitz::Array<double,2> m_tmp_tt;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_IVECTOR_MACHINE_H
diff --git a/bob/learn/em/include/bob.learn.em/IVectorTrainer.h b/bob/learn/em/include/bob.learn.em/IVectorTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..4d90627f6c97d9a771498845bbfc229d937db9ca
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/IVectorTrainer.h
@@ -0,0 +1,152 @@
+/**
+ * @date Sat Mar 30 20:55:00 2013 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_IVECTOR_TRAINER_H
+#define BOB_LEARN_EM_IVECTOR_TRAINER_H
+
+#include <blitz/array.h>
+#include <bob.learn.em/IVectorMachine.h>
+#include <bob.learn.em/GMMStats.h>
+#include <boost/shared_ptr.hpp>
+#include <vector>
+#include <bob.core/array_copy.h>
+#include <boost/random.hpp>
+
+#include <boost/random/mersenne_twister.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief An IVectorTrainer to learn a Total Variability subspace \f$T\f$
+ *  (and eventually a covariance matrix \f$\Sigma\f$).\n
+ * Reference:\n
+ * "Front-End Factor Analysis For Speaker Verification",
+ *    N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet,
+ *   IEEE Trans. on Audio, Speech and Language Processing
+ */
+class IVectorTrainer
+{
+  public:
+    /**
+     * @brief Default constructor. Builds an IVectorTrainer
+     */
+    IVectorTrainer(const bool update_sigma=false);
+
+    /**
+     * @brief Copy constructor
+     */
+    IVectorTrainer(const IVectorTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~IVectorTrainer();
+
+    /**
+     * @brief Initialization before the EM loop
+     */
+    virtual void initialize(bob::learn::em::IVectorMachine& ivector);
+
+    /**
+     * @brief Calculates statistics across the dataset,
+     * and saves these as:
+     * - m_acc_Nij_wij2
+     * - m_acc_Fnormij_wij
+     * - m_acc_Nij (only if update_sigma is enabled)
+     * - m_acc_Snormij (only if update_sigma is enabled)
+     *
+     * These statistics will be used in the mStep() that follows.
+     */
+    virtual void eStep(bob::learn::em::IVectorMachine& ivector,
+      const std::vector<bob::learn::em::GMMStats>& data);
+
+    /**
+     * @brief Maximisation step: Update the Total Variability matrix \f$T\f$
+     * and \f$\Sigma\f$ if update_sigma is enabled.
+     */
+    virtual void mStep(bob::learn::em::IVectorMachine& ivector);
+
+
+    /**
+     * @brief Assigns from a different IVectorTrainer
+     */
+    IVectorTrainer& operator=(const IVectorTrainer &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const IVectorTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const IVectorTrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const IVectorTrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Getters for the accumulators
+     */
+    const blitz::Array<double,3>& getAccNijWij2() const
+    { return m_acc_Nij_wij2; }
+    const blitz::Array<double,3>& getAccFnormijWij() const
+    { return m_acc_Fnormij_wij; }
+    const blitz::Array<double,1>& getAccNij() const
+    { return m_acc_Nij; }
+    const blitz::Array<double,2>& getAccSnormij() const
+    { return m_acc_Snormij; }
+
+    /**
+     * @brief Setters for the accumulators, Very useful if the e-Step needs
+     * to be parallelized.
+     */
+    void setAccNijWij2(const blitz::Array<double,3>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_Nij_wij2);
+      m_acc_Nij_wij2 = acc; }
+    void setAccFnormijWij(const blitz::Array<double,3>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_Fnormij_wij);
+      m_acc_Fnormij_wij = acc; }
+    void setAccNij(const blitz::Array<double,1>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_Nij);
+      m_acc_Nij = acc; }
+    void setAccSnormij(const blitz::Array<double,2>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_Snormij);
+      m_acc_Snormij = acc; }
+
+  protected:
+    // Attributes
+    bool m_update_sigma;
+
+    // Acccumulators
+    blitz::Array<double,3> m_acc_Nij_wij2;
+    blitz::Array<double,3> m_acc_Fnormij_wij;
+    blitz::Array<double,1> m_acc_Nij;
+    blitz::Array<double,2> m_acc_Snormij;
+
+    // Working arrays
+    mutable blitz::Array<double,1> m_tmp_wij;
+    mutable blitz::Array<double,2> m_tmp_wij2;
+    mutable blitz::Array<double,1> m_tmp_d1;
+    mutable blitz::Array<double,1> m_tmp_t1;
+    mutable blitz::Array<double,2> m_tmp_dd1;
+    mutable blitz::Array<double,2> m_tmp_dt1;
+    mutable blitz::Array<double,2> m_tmp_tt1;
+    mutable blitz::Array<double,2> m_tmp_tt2;
+    
+    /**
+     * @brief The random number generator for the inialization
+     */
+    boost::shared_ptr<boost::mt19937> m_rng;    
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_IVECTOR_TRAINER_H
diff --git a/bob/learn/em/include/bob.learn.em/JFABase.h b/bob/learn/em/include/bob.learn.em/JFABase.h
new file mode 100644
index 0000000000000000000000000000000000000000..c75cec864187c46a1648e6a7288188c1a2754929
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/JFABase.h
@@ -0,0 +1,253 @@
+/**
+ * @date Tue Jan 27 15:54:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief A base class for Joint Factor Analysis-like machines
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_JFABASE_H
+#define BOB_LEARN_EM_JFABASE_H
+
+#include <stdexcept>
+
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/FABase.h>
+//#include <bob.learn.em/LinearScoring.h>
+
+#include <bob.io.base/HDF5File.h>
+#include <boost/shared_ptr.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+
+/**
+ * @brief A JFA Base class which contains U, V and D matrices
+ * TODO: add a reference to the journal articles
+ */
+class JFABase
+{
+  public:
+    /**
+     * @brief Default constructor. Builds a 1 x 1 JFABase
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     */
+    JFABase();
+
+    /**
+     * @brief Constructor. Builds a new JFABase.
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     *
+     * @param ubm The Universal Background Model
+     * @param ru size of U (CD x ru)
+     * @param rv size of U (CD x rv)
+     * @warning ru and rv SHOULD BE  >= 1.
+     */
+    JFABase(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
+
+    /**
+     * @brief Copy constructor
+     */
+    JFABase(const JFABase& other);
+
+    /**
+     * @deprecated Starts a new JFAMachine from an existing Configuration object.
+     */
+    JFABase(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Just to virtualise the destructor
+     */
+    virtual ~JFABase();
+
+    /**
+     * @brief Assigns from a different JFA machine
+     */
+    JFABase& operator=(const JFABase &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const JFABase& b) const
+    { return m_base.operator==(b.m_base); }
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const JFABase& b) const
+    { return m_base.operator!=(b.m_base); }
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const JFABase& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const
+    { return m_base.is_similar_to(b.m_base, r_epsilon, a_epsilon); }
+
+    /**
+     * @brief Saves model to an HDF5 file
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets
+     * the current state.
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Returns the UBM
+     */
+    const boost::shared_ptr<bob::learn::em::GMMMachine> getUbm() const
+    { return m_base.getUbm(); }
+
+    /**
+     * @brief Returns the U matrix
+     */
+    const blitz::Array<double,2>& getU() const
+    { return m_base.getU(); }
+
+    /**
+     * @brief Returns the V matrix
+     */
+    const blitz::Array<double,2>& getV() const
+    { return m_base.getV(); }
+
+    /**
+     * @brief Returns the diagonal matrix diag(d) (as a 1D vector)
+     */
+    const blitz::Array<double,1>& getD() const
+    { return m_base.getD(); }
+
+    /**
+     * @brief Returns the number of Gaussian components
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNGaussians() const
+    { return m_base.getNGaussians();}
+
+    /**
+     * @brief Returns the feature dimensionality D
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNInputs() const
+    { return m_base.getNInputs(); }
+
+    /**
+     * @brief Returns the supervector length CD
+     * (CxD: Number of Gaussian components by the feature dimensionality)
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getSupervectorLength() const
+    { return m_base.getSupervectorLength(); }
+
+    /**
+     * @brief Returns the size/rank ru of the U matrix
+     */
+    const size_t getDimRu() const
+    { return m_base.getDimRu(); }
+
+    /**
+     * @brief Returns the size/rank rv of the V matrix
+     */
+    const size_t getDimRv() const
+    { return m_base.getDimRv(); }
+
+    /**
+     * @brief Resets the dimensionality of the subspace U and V
+     * U and V are hence uninitialized.
+     */
+    void resize(const size_t ru, const size_t rv)
+    { m_base.resize(ru, rv); }
+
+    /**
+     * @brief Returns the U matrix in order to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,2>& updateU()
+    { return m_base.updateU(); }
+
+    /**
+     * @brief Returns the V matrix in order to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,2>& updateV()
+    { return m_base.updateV(); }
+
+    /**
+     * @brief Returns the diagonal matrix diag(d) (as a 1D vector) in order
+     * to update it
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    blitz::Array<double,1>& updateD()
+    { return m_base.updateD(); }
+
+
+    /**
+     * @brief Sets (the mean supervector of) the Universal Background Model
+     * U, V and d are uninitialized in case of dimensions update (C or D)
+     */
+    void setUbm(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm)
+    { m_base.setUbm(ubm); }
+
+    /**
+     * @brief Sets the U matrix
+     */
+    void setU(const blitz::Array<double,2>& U)
+    { m_base.setU(U); }
+
+    /**
+     * @brief Sets the V matrix
+     */
+    void setV(const blitz::Array<double,2>& V)
+    { m_base.setV(V); }
+
+    /**
+     * @brief Sets the diagonal matrix diag(d)
+     * (a 1D vector is expected as an argument)
+     */
+    void setD(const blitz::Array<double,1>& d)
+    { m_base.setD(d); }
+
+    /**
+     * @brief Estimates x from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateX(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    { m_base.estimateX(gmm_stats, x); }
+
+    /**
+     * @brief Precompute (put U^{T}.Sigma^{-1} matrix in cache)
+     * @warning Should only be used by the trainer for efficiency reason,
+     *   or for testing purpose.
+     */
+    void precompute()
+    { m_base.updateCacheUbmUVD(); }
+
+    /**
+     * @brief Returns the FABase member
+     */
+    const bob::learn::em::FABase& getBase() const
+    { return m_base; }
+
+
+  private:
+    // FABase
+    bob::learn::em::FABase m_base;
+};
+
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_JFABASE_H
diff --git a/bob/learn/em/include/bob.learn.em/JFAMachine.h b/bob/learn/em/include/bob.learn.em/JFAMachine.h
new file mode 100644
index 0000000000000000000000000000000000000000..6569b855d1b9852a22931e8e0f429e9c30514f1c
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/JFAMachine.h
@@ -0,0 +1,254 @@
+/**
+ * @date Tue Jan 27 16:47:00 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief A base class for Joint Factor Analysis-like machines
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_JFAMACHINE_H
+#define BOB_LEARN_EM_JFAMACHINE_H
+
+#include <stdexcept>
+
+#include <bob.learn.em/JFABase.h>
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/LinearScoring.h>
+
+#include <bob.io.base/HDF5File.h>
+#include <boost/shared_ptr.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+
+/**
+ * @brief A JFAMachine which is associated to a JFABase that contains
+ *   U, V and D matrices. The JFAMachine describes the identity part
+ *   (latent variables y and z)
+ * TODO: add a reference to the journal articles
+ */
+class JFAMachine
+{
+  public:
+    /**
+     * @brief Default constructor. Builds an otherwise invalid 0 x 0 JFAMachine
+     * The Universal Background Model and the matrices U, V and diag(d) are
+     * not initialized.
+     */
+    JFAMachine();
+
+    /**
+     * @brief Constructor. Builds a new JFAMachine.
+     *
+     * @param jfa_base The JFABase associated with this machine
+     */
+    JFAMachine(const boost::shared_ptr<bob::learn::em::JFABase> jfa_base);
+
+    /**
+     * @brief Copy constructor
+     */
+    JFAMachine(const JFAMachine& other);
+
+    /**
+     * @deprecated Starts a new JFAMachine from an existing Configuration object.
+     */
+    JFAMachine(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Just to virtualise the destructor
+     */
+    virtual ~JFAMachine();
+
+    /**
+     * @brief Assigns from a different JFA machine
+     */
+    JFAMachine& operator=(const JFAMachine &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const JFAMachine& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const JFAMachine& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const JFAMachine& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Saves machine to an HDF5 file
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets
+     * the current state.
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Returns the number of Gaussian components C
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNGaussians() const
+    { return m_jfa_base->getNGaussians(); }
+
+    /**
+     * @brief Returns the feature dimensionality D
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getNInputs() const
+    { return m_jfa_base->getNInputs(); }
+
+    /**
+     * @brief Returns the supervector length CD
+     * (CxD: Number of Gaussian components by the feature dimensionality)
+     * @warning An exception is thrown if no Universal Background Model has
+     *   been set yet.
+     */
+    const size_t getSupervectorLength() const
+    { return m_jfa_base->getSupervectorLength(); }
+
+    /**
+     * @brief Returns the size/rank ru of the U matrix
+     */
+    const size_t getDimRu() const
+    { return m_jfa_base->getDimRu(); }
+
+    /**
+     * @brief Returns the size/rank rv of the V matrix
+     */
+    const size_t getDimRv() const
+    { return m_jfa_base->getDimRv(); }
+
+    /**
+     * @brief Returns the x session factor
+     */
+    const blitz::Array<double,1>& getX() const
+    { return m_cache_x; }
+
+    /**
+     * @brief Returns the y speaker factor
+     */
+    const blitz::Array<double,1>& getY() const
+    { return m_y; }
+
+    /**
+     * @brief Returns the z speaker factor
+     */
+    const blitz::Array<double,1>& getZ() const
+    { return m_z; }
+
+    /**
+     * @brief Returns the y speaker factors in order to update it
+     */
+    blitz::Array<double,1>& updateY()
+    { return m_y; }
+
+    /**
+     * @brief Returns the z speaker factors in order to update it
+     */
+    blitz::Array<double,1>& updateZ()
+    { return m_z; }
+
+    /**
+     * @brief Returns the y speaker factors
+     */
+    void setY(const blitz::Array<double,1>& y);
+
+    /**
+     * @brief Returns the V matrix
+     */
+    void setZ(const blitz::Array<double,1>& z);
+
+    /**
+     * @brief Returns the JFABase
+     */
+    const boost::shared_ptr<bob::learn::em::JFABase> getJFABase() const
+    { return m_jfa_base; }
+
+    /**
+     * @brief Sets the JFABase
+     */
+    void setJFABase(const boost::shared_ptr<bob::learn::em::JFABase> jfa_base);
+
+
+    /**
+     * @brief Estimates x from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateX(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    { m_jfa_base->estimateX(gmm_stats, x); }
+    /**
+     * @brief Estimates Ux from the GMM statistics considering the LPT
+     * assumption, that is the latent session variable x is approximated
+     * using the UBM
+     */
+    void estimateUx(const bob::learn::em::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
+
+   /**
+    * @brief Execute the machine
+    *
+    * @param input input data used by the machine
+    * @warning Inputs are checked
+    * @return score value computed by the machine
+    */
+    double forward(const bob::learn::em::GMMStats& input);
+    /**
+     * @brief Computes a score for the given UBM statistics and given the
+     * Ux vector
+     */
+    double forward(const bob::learn::em::GMMStats& gmm_stats,
+      const blitz::Array<double,1>& Ux);
+
+    /**
+     * @brief Execute the machine
+     *
+     * @param input input data used by the machine
+     * @param score value computed by the machine
+     * @warning Inputs are NOT checked
+     */
+    double forward_(const bob::learn::em::GMMStats& input);
+
+  private:
+    /**
+     * @brief Resize latent variable according to the JFABase
+     */
+    void resize();
+    /**
+     * @brief Resize working arrays
+     */
+    void resizeTmp();
+    /**
+     * @brief Update the cache
+     */
+    void updateCache();
+
+    // UBM
+    boost::shared_ptr<bob::learn::em::JFABase> m_jfa_base;
+
+    // y and z vectors/factors learned during the enrolment procedure
+    blitz::Array<double,1> m_y;
+    blitz::Array<double,1> m_z;
+
+    // cache
+    blitz::Array<double,1> m_cache_mVyDz;
+    mutable blitz::Array<double,1> m_cache_x;
+
+    // x vector/factor in cache when computing scores
+    mutable blitz::Array<double,1> m_tmp_Ux;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_JFAMACHINE_H
diff --git a/bob/learn/em/include/bob.learn.em/JFATrainer.h b/bob/learn/em/include/bob.learn.em/JFATrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..07c0646b59ba95e17c2150ac91b3e89cc95e0dbb
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/JFATrainer.h
@@ -0,0 +1,238 @@
+/**
+ * @date Tue Jul 19 12:16:17 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief JFA functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_JFATRAINER_H
+#define BOB_LEARN_EM_JFATRAINER_H
+
+#include <blitz/array.h>
+#include <bob.learn.em/GMMStats.h>
+#include <bob.learn.em/FABaseTrainer.h>
+#include <bob.learn.em/JFAMachine.h>
+#include <vector>
+
+#include <map>
+#include <string>
+#include <bob.core/array_copy.h>
+#include <boost/shared_ptr.hpp>
+#include <boost/random.hpp>
+#include <bob.core/logging.h>
+
+namespace bob { namespace learn { namespace em {
+
+class JFATrainer
+{
+  public:
+    /**
+     * @brief Constructor
+     */
+    JFATrainer();
+
+    /**
+     * @brief Copy onstructor
+     */
+    JFATrainer(const JFATrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~JFATrainer();
+
+    /**
+     * @brief Assignment operator
+     */
+    JFATrainer& operator=(const JFATrainer& other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const JFATrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const JFATrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const JFATrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Sets the maximum number of EM-like iterations (for each subspace)
+     */
+    //void setMaxIterations(const size_t max_iterations)
+    //{ m_max_iterations = max_iterations; }
+
+    /**
+     * @brief Gets the maximum number of EM-like iterations (for each subspace)
+     */
+    //size_t getMaxIterations() const
+    //{ return m_max_iterations; }
+
+    /**
+     * @brief This methods performs some initialization before the EM loop.
+     */
+    virtual void initialize(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+
+    /**
+     * @brief This methods performs the e-Step to train the first subspace V
+     */
+    virtual void eStep1(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the m-Step to train the first subspace V
+     */
+    virtual void mStep1(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the finalization after training the first
+     * subspace V
+     */
+    virtual void finalize1(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the e-Step to train the second subspace U
+     */
+    virtual void eStep2(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the m-Step to train the second subspace U
+     */
+    virtual void mStep2(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the finalization after training the second
+     * subspace U
+     */
+    virtual void finalize2(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the e-Step to train the third subspace d
+     */
+    virtual void eStep3(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the m-Step to train the third subspace d
+     */
+    virtual void mStep3(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs the finalization after training the third
+     * subspace d
+     */
+    virtual void finalize3(bob::learn::em::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+
+    /**
+     * @brief This methods performs the main loops to train the subspaces U, V and d
+     */
+    //virtual void train_loop(bob::learn::em::JFABase& machine,
+      //const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+    /**
+     * @brief This methods trains the subspaces U, V and d
+     */
+    //virtual void train(bob::learn::em::JFABase& machine,
+      //const std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& ar);
+
+    /**
+     * @brief Enrol a client
+     */
+    void enrol(bob::learn::em::JFAMachine& machine,
+      const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& features,
+      const size_t n_iter);
+
+    /**
+     * @brief Sets the Random Number Generator
+     */
+    void setRng(const boost::shared_ptr<boost::mt19937> rng)
+    { m_rng = rng; }
+
+    /**
+     * @brief Gets the Random Number Generator
+     */
+    const boost::shared_ptr<boost::mt19937> getRng() const
+    { return m_rng; }
+
+    /**
+     * @brief Get the x speaker factors
+     */
+    const std::vector<blitz::Array<double,2> >& getX() const
+    { return m_base_trainer.getX(); }
+    /**
+     * @brief Get the y speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getY() const
+    { return m_base_trainer.getY(); }
+    /**
+     * @brief Get the z speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getZ() const
+    { return m_base_trainer.getZ(); }
+    /**
+     * @brief Set the x speaker factors
+     */
+    void setX(const std::vector<blitz::Array<double,2> >& X)
+    { m_base_trainer.setX(X); }
+    /**
+     * @brief Set the y speaker factors
+     */
+    void setY(const std::vector<blitz::Array<double,1> >& y)
+    { m_base_trainer.setY(y); }
+    /**
+     * @brief Set the z speaker factors
+     */
+    void setZ(const std::vector<blitz::Array<double,1> >& z)
+    { m_base_trainer.setZ(z); }
+
+    /**
+     * @brief Getters for the accumulators
+     */
+    const blitz::Array<double,3>& getAccVA1() const
+    { return m_base_trainer.getAccVA1(); }
+    const blitz::Array<double,2>& getAccVA2() const
+    { return m_base_trainer.getAccVA2(); }
+    const blitz::Array<double,3>& getAccUA1() const
+    { return m_base_trainer.getAccUA1(); }
+    const blitz::Array<double,2>& getAccUA2() const
+    { return m_base_trainer.getAccUA2(); }
+    const blitz::Array<double,1>& getAccDA1() const
+    { return m_base_trainer.getAccDA1(); }
+    const blitz::Array<double,1>& getAccDA2() const
+    { return m_base_trainer.getAccDA2(); }
+
+    /**
+     * @brief Setters for the accumulators, Very useful if the e-Step needs
+     * to be parallelized.
+     */
+    void setAccVA1(const blitz::Array<double,3>& acc)
+    { m_base_trainer.setAccVA1(acc); }
+    void setAccVA2(const blitz::Array<double,2>& acc)
+    { m_base_trainer.setAccVA2(acc); }
+    void setAccUA1(const blitz::Array<double,3>& acc)
+    { m_base_trainer.setAccUA1(acc); }
+    void setAccUA2(const blitz::Array<double,2>& acc)
+    { m_base_trainer.setAccUA2(acc); }
+    void setAccDA1(const blitz::Array<double,1>& acc)
+    { m_base_trainer.setAccDA1(acc); }
+    void setAccDA2(const blitz::Array<double,1>& acc)
+    { m_base_trainer.setAccDA2(acc); }
+
+
+  private:
+    // Attributes
+    //size_t m_max_iterations;
+    boost::shared_ptr<boost::mt19937> m_rng; ///< The random number generator for the inialization
+    bob::learn::em::FABaseTrainer m_base_trainer;
+};
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_EM_JFATRAINER_H */
diff --git a/bob/learn/em/include/bob.learn.em/KMeansMachine.h b/bob/learn/em/include/bob.learn.em/KMeansMachine.h
new file mode 100644
index 0000000000000000000000000000000000000000..2c8113a6a06644b7af2f6839e76106da8de930bf
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/KMeansMachine.h
@@ -0,0 +1,244 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+#ifndef BOB_LEARN_EM_KMEANSMACHINE_H
+#define BOB_LEARN_EM_KMEANSMACHINE_H
+
+#include <blitz/array.h>
+#include <cfloat>
+
+#include <bob.io.base/HDF5File.h>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class implements a k-means classifier.
+ * @details See Section 9.1 of Bishop, "Pattern recognition and machine learning", 2006
+ */
+class KMeansMachine {
+  public:
+    /**
+     * Default constructor. Builds an otherwise invalid 0 x 0 k-means
+     * machine. This is equivalent to construct a LinearMachine with two
+     * size_t parameters set to 0, as in LinearMachine(0, 0).
+     */
+    KMeansMachine();
+
+    /**
+     * Constructor
+     * @param[in] n_means  The number of means
+     * @param[in] n_inputs The feature dimensionality
+     */
+    KMeansMachine(const size_t n_means, const size_t n_inputs);
+
+    /**
+     * Builds a new machine with the given means. Each row of the means
+     * matrix should represent a mean.
+     */
+    KMeansMachine(const blitz::Array<double,2>& means);
+
+    /**
+     * Copies another machine (copy constructor)
+     */
+    KMeansMachine(const KMeansMachine& other);
+
+    /**
+     * Starts a new KMeansMachine from an existing Configuration object.
+     */
+    KMeansMachine(bob::io::base::HDF5File& config);
+
+    /**
+     * Destructor
+     */
+    virtual ~KMeansMachine();
+
+    /**
+     * Assigns from a different machine
+     */
+    KMeansMachine& operator=(const KMeansMachine& other);
+
+    /**
+     * Equal to
+     */
+    bool operator==(const KMeansMachine& b) const;
+
+    /**
+     * Not equal to
+     */
+    bool operator!=(const KMeansMachine& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const KMeansMachine& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * Loads data from an existing configuration object. Resets the current
+     * state.
+     */
+    void load(bob::io::base::HDF5File& config);
+
+    /**
+     * Saves an existing machine to a Configuration object.
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * Output the minimum (Square Euclidean) distance between the input and
+     * one of the means (overrides Machine::forward)
+     */
+    void forward(const blitz::Array<double,1>& input, double& output) const;
+
+    /**
+     * Output the minimum (Square Euclidean) distance between the input and
+     * one of the means (overrides Machine::forward_)
+     * @warning Inputs are NOT checked
+     */
+    void forward_(const blitz::Array<double,1>& input, double& output) const;
+
+
+    /**
+     * Set the means
+     */
+    void setMeans(const blitz::Array<double,2>& means);
+
+    /**
+     * Set the i'th mean
+     */
+    void setMean(const size_t i, const blitz::Array<double,1>& mean);
+
+    /**
+     * Get a mean
+     * @param[in]   i    The index of the mean
+     * @param[out] mean The mean, a 1D array, with a length equal to the number of feature dimensions.
+     */
+    const blitz::Array<double,1> getMean(const size_t i) const;
+
+    /**
+     * Get the means (i.e. a 2D array, with as many rows as means, and as
+     * many columns as feature dimensions.)
+     */
+    const blitz::Array<double,2>& getMeans() const
+    { return m_means; }
+
+     /**
+     * Get the means in order to be updated (i.e. a 2D array, with as many
+     * rows as means, and as many columns as feature dimensions.)
+     * @warning Only trainers should use this function for efficiency reasons
+     */
+    blitz::Array<double,2>& updateMeans()
+    { return m_means; }
+
+    /**
+     * Return the power of two of the (Square Euclidean) distance of the
+     * sample, x, to the i'th mean
+     * @param x The data sample (feature vector)
+     * @param i The index of the mean
+     */
+    double getDistanceFromMean(const blitz::Array<double,1>& x,
+      const size_t i) const;
+
+    /**
+     * Calculate the index of the mean that is closest
+     * (in terms of Square Euclidean distance) to the data sample, x
+     * @param x The data sample (feature vector)
+     * @param closest_mean (output) The index of the mean closest to the sample
+     * @param min_distance (output) The distance of the sample from the closest mean
+     */
+    void getClosestMean(const blitz::Array<double,1>& x,
+      size_t &closest_mean, double &min_distance) const;
+
+    /**
+     * Output the minimum (Square Euclidean) distance between the input and
+     * one of the means
+     */
+    double getMinDistance(const blitz::Array<double,1>& input) const;
+
+    /**
+     * For each mean, find the subset of the samples
+     * that is closest to that mean, and calculate
+     * 1) the variance of that subset (the cluster variance)
+     * 2) the proportion of the samples represented by that subset (the cluster weight)
+     * @param[in]  data      The data
+     * @param[out] variances The cluster variances (one row per cluster),
+     *                       with as many columns as feature dimensions.
+     * @param[out] weights   A vector of weights, one per cluster
+     */
+    void getVariancesAndWeightsForEachCluster(const blitz::Array<double,2> &data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
+    /**
+     * Methods consecutively called by getVariancesAndWeightsForEachCluster()
+     * This should help for the parallelization on several nodes by splitting the data and calling
+     * getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum
+     * with the m_cache_means, variances, and weights variables before performing the merge on one
+     * node using getVariancesAndWeightsForEachClusterFin().
+     */
+    void getVariancesAndWeightsForEachClusterInit(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
+    void getVariancesAndWeightsForEachClusterAcc(const blitz::Array<double,2> &data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
+    void getVariancesAndWeightsForEachClusterFin(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const;
+
+    /**
+     * Get the m_cache_means array.
+     * @warning This variable should only be used in the case you want to parallelize the
+     * getVariancesAndWeightsForEachCluster() method!
+     */
+    const blitz::Array<double,2>& getCacheMeans() const
+    { return m_cache_means; }
+
+    /**
+     * Set the m_cache_means array.
+     * @warning This variable should only be used in the case you want to parallelize the
+     * getVariancesAndWeightsForEachCluster() method!
+     */
+    void setCacheMeans(const blitz::Array<double,2>& cache_means);
+
+    /**
+     * Resize the means
+     */
+    void resize(const size_t n_means, const size_t n_inputs);
+
+    /**
+     * Return the number of means
+     */
+    size_t getNMeans() const { return m_n_means; }
+
+    /**
+     * Return the number of inputs
+     */
+    size_t getNInputs() const { return m_n_inputs; }
+
+    /**
+     * Prints a KMeansMachine in the output stream
+     */
+    friend std::ostream& operator<<(std::ostream& os, const KMeansMachine& km);
+
+
+  private:
+     /**
+     * The number of means
+     */
+    size_t m_n_means;
+
+    /**
+     * The number of inputs
+     */
+    size_t m_n_inputs;
+
+    /**
+     * The means (each row is a mean)
+     */
+    blitz::Array<double,2> m_means;
+
+    /**
+     * cache to avoid re-allocation
+     */
+    mutable blitz::Array<double,2> m_cache_means;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_KMEANSMACHINE_H
diff --git a/bob/learn/em/include/bob.learn.em/KMeansTrainer.h b/bob/learn/em/include/bob.learn.em/KMeansTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..e3d53a5cddf38ce06d9841d836a2bfb0030c394d
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/KMeansTrainer.h
@@ -0,0 +1,187 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+#ifndef BOB_LEARN_EM_KMEANSTRAINER_H
+#define BOB_LEARN_EM_KMEANSTRAINER_H
+
+#include <bob.learn.em/KMeansMachine.h>
+#include <boost/version.hpp>
+#include <boost/random/mersenne_twister.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * Trains a KMeans machine.
+ * @brief This class implements the expectation-maximisation algorithm for a k-means machine.
+ * @details See Section 9.1 of Bishop, "Pattern recognition and machine learning", 2006
+ *          It uses a random initialisation of the means followed by the expectation-maximization algorithm
+ */
+class KMeansTrainer
+{
+  public:
+    /**
+     * @brief This enumeration defines different initialization methods for
+     * K-means
+     */
+    typedef enum {
+      RANDOM=0,
+      RANDOM_NO_DUPLICATE
+#if BOOST_VERSION >= 104700
+      ,
+      KMEANS_PLUS_PLUS
+#endif
+    }
+    InitializationMethod;
+
+    /**
+     * @brief Constructor
+     */
+    KMeansTrainer(InitializationMethod=RANDOM);
+
+    /*     
+    KMeansTrainer(double convergence_threshold=0.001,
+      size_t max_iterations=10, bool compute_likelihood=true,
+      InitializationMethod=RANDOM);*/
+      
+
+    /**
+     * @brief Virtualize destructor
+     */
+    virtual ~KMeansTrainer() {}
+
+    /**
+     * @brief Copy constructor
+     */
+    KMeansTrainer(const KMeansTrainer& other);
+
+    /**
+     * @brief Assigns from a different machine
+     */
+    KMeansTrainer& operator=(const KMeansTrainer& other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const KMeansTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const KMeansTrainer& b) const;
+
+    /**
+     * @brief The name for this trainer
+     */
+    virtual std::string name() const { return "KMeansTrainer"; }
+
+    /**
+     * @brief Initialise the means randomly.
+     * Data is split into as many chunks as there are means,
+     * then each mean is set to a random example within each chunk.
+     */
+    void initialize(bob::learn::em::KMeansMachine& kMeansMachine,
+      const blitz::Array<double,2>& sampler);
+
+    /**
+     * @brief Accumulate across the dataset:
+     * - zeroeth and first order statistics
+     * - average (Square Euclidean) distance from the closest mean
+     * Implements EMTrainer::eStep(double &)
+     */
+    void eStep(bob::learn::em::KMeansMachine& kmeans,
+      const blitz::Array<double,2>& data);
+
+    /**
+     * @brief Updates the mean based on the statistics from the E-step.
+     */
+    void mStep(bob::learn::em::KMeansMachine& kmeans);
+
+    /**
+     * @brief This functions returns the average min (Square Euclidean)
+     * distance (average distance to the closest mean)
+     */
+    double computeLikelihood(bob::learn::em::KMeansMachine& kmeans);
+
+
+    /**
+     * @brief Reset the statistics accumulators
+     * to the correct size and a value of zero.
+     */
+    bool resetAccumulators(bob::learn::em::KMeansMachine& kMeansMachine);
+
+    /**
+     * @brief Sets the Random Number Generator
+     */
+    void setRng(const boost::shared_ptr<boost::mt19937> rng)
+    { m_rng = rng; }
+
+    /**
+     * @brief Gets the Random Number Generator
+     */
+    const boost::shared_ptr<boost::mt19937> getRng() const
+    { return m_rng; }
+
+    /**
+     * @brief Sets the initialization method used to generate the initial means
+     */
+    void setInitializationMethod(InitializationMethod v) { m_initialization_method = v; }
+
+    /**
+     * @brief Gets the initialization method used to generate the initial means
+     */
+    InitializationMethod getInitializationMethod() const { return m_initialization_method; }
+
+    /**
+     * @brief Returns the internal statistics. Useful to parallelize the E-step
+     */
+    const blitz::Array<double,1>& getZeroethOrderStats() const { return m_zeroethOrderStats; }
+    const blitz::Array<double,2>& getFirstOrderStats() const { return m_firstOrderStats; }
+    double getAverageMinDistance() const { return m_average_min_distance; }
+    /**
+     * @brief Sets the internal statistics. Useful to parallelize the E-step
+     */
+    void setZeroethOrderStats(const blitz::Array<double,1>& zeroethOrderStats);
+    void setFirstOrderStats(const blitz::Array<double,2>& firstOrderStats);
+    void setAverageMinDistance(const double value) { m_average_min_distance = value; }
+
+
+  private:
+  
+    /**
+     * @brief The initialization method
+     * Check that there is no duplicated means during the random initialization
+     */
+    InitializationMethod m_initialization_method;
+
+    /**
+     * @brief The random number generator for the inialization
+     */
+    boost::shared_ptr<boost::mt19937> m_rng;
+
+    /**
+     * @brief Average min (Square Euclidean) distance
+     */
+    double m_average_min_distance;
+
+    /**
+     * @brief Zeroeth order statistics accumulator.
+     * The k'th value in m_zeroethOrderStats is the denominator of
+     * equation 9.4, Bishop, "Pattern recognition and machine learning", 2006
+     */
+    blitz::Array<double,1> m_zeroethOrderStats;
+
+    /**
+     * @brief First order statistics accumulator.
+     * The k'th row of m_firstOrderStats is the numerator of
+     * equation 9.4, Bishop, "Pattern recognition and machine learning", 2006
+     */
+    blitz::Array<double,2> m_firstOrderStats;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_KMEANSTRAINER_H
diff --git a/bob/learn/em/include/bob.learn.em/LinearScoring.h b/bob/learn/em/include/bob.learn.em/LinearScoring.h
new file mode 100644
index 0000000000000000000000000000000000000000..822922ceefbbd8a5069fb77f6a788cf63dd617e9
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/LinearScoring.h
@@ -0,0 +1,98 @@
+/**
+ * @date Wed Jul 13 16:00:04 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+#ifndef BOB_LEARN_EM_LINEARSCORING_H
+#define BOB_LEARN_EM_LINEARSCORING_H
+
+#include <blitz/array.h>
+#include <boost/shared_ptr.hpp>
+#include <vector>
+#include <bob.learn.em/GMMMachine.h>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * Compute a matrix of scores using linear scoring.
+ *
+ * @warning Each GMM must have the same size.
+ *
+ * @param models        list of mean supervector for the client models
+ * @param ubm_mean      mean supervector of the world model
+ * @param ubm_variance  variance supervector of the world model
+ * @param test_stats    list of accumulate statistics for each test trial
+ * @param test_channelOffset  list of channel offset if any (for JFA/ISA for instance)
+ * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
+ * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
+ * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
+ */
+void linearScoring(const std::vector<blitz::Array<double,1> >& models,
+                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const std::vector<blitz::Array<double, 1> >& test_channelOffset,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double,2>& scores);
+void linearScoring(const std::vector<blitz::Array<double,1> >& models,
+                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double,2>& scores);
+
+/**
+ * Compute a matrix of scores using linear scoring.
+ *
+ * @warning Each GMM must have the same size.
+ *
+ * @param models      list of client models as GMMMachines
+ * @param ubm         world model as a GMMMachine
+ * @param test_stats  list of accumulate statistics for each test trial
+ * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
+ * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
+ * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
+ */
+void linearScoring(const std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> >& models,
+                   const bob::learn::em::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double,2>& scores);
+/**
+ * Compute a matrix of scores using linear scoring.
+ *
+ * @warning Each GMM must have the same size.
+ *
+ * @param models      list of client models as GMMMachines
+ * @param ubm         world model as a GMMMachine
+ * @param test_stats  list of accumulate statistics for each test trial
+ * @param test_channelOffset  list of channel offset if any (for JFA/ISA for instance)
+ * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
+ * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
+ * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
+ */
+void linearScoring(const std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> >& models,
+                   const bob::learn::em::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& test_stats,
+                   const std::vector<blitz::Array<double, 1> >& test_channelOffset,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double,2>& scores);
+
+/**
+ * Compute a score using linear scoring.
+ *
+ * @param model         mean supervector for the client model
+ * @param ubm_mean      mean supervector of the world model
+ * @param ubm_variance  variance supervector of the world model
+ * @param test_stats    accumulate statistics of the test trial
+ * @param test_channelOffset  channel offset
+ * @param frame_length_normalisation   perform a normalisation by the number of feature vectors
+ */
+double linearScoring(const blitz::Array<double,1>& model,
+                   const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
+                   const bob::learn::em::GMMStats& test_stats,
+                   const blitz::Array<double,1>& test_channelOffset,
+                   const bool frame_length_normalisation);
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_LINEARSCORING_H
diff --git a/bob/learn/em/include/bob.learn.em/MAP_GMMTrainer.h b/bob/learn/em/include/bob.learn.em/MAP_GMMTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..7d30b2628355b097c76c3547eb4271f0271d5afb
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/MAP_GMMTrainer.h
@@ -0,0 +1,170 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * @brief This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation.
+ * @details See Section 3.4 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000. We use a "single adaptation coefficient", alpha_i, and thus a single relevance factor, r.
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_MAP_GMMTRAINER_H
+#define BOB_LEARN_EM_MAP_GMMTRAINER_H
+
+#include <bob.learn.em/GMMBaseTrainer.h>
+#include <limits>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation.
+ * @details See Section 3.4 of Reynolds et al., "Speaker Verification Using Adapted Gaussian Mixture Models", Digital Signal Processing, 2000. We use a "single adaptation coefficient", alpha_i, and thus a single relevance factor, r.
+ */
+class MAP_GMMTrainer
+{
+  public:
+    /**
+     * @brief Default constructor
+     */
+    MAP_GMMTrainer(
+      const bool update_means=true,
+      const bool update_variances=false, 
+      const bool update_weights=false,
+      const double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon(),
+      const bool reynolds_adaptation=false, 
+      const double relevance_factor=4, 
+      const double alpha=0.5,
+      boost::shared_ptr<bob::learn::em::GMMMachine> prior_gmm = 0);
+
+    /**
+     * @brief Copy constructor
+     */
+    MAP_GMMTrainer(const MAP_GMMTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~MAP_GMMTrainer();
+
+    /**
+     * @brief Initialization
+     */
+    void initialize(bob::learn::em::GMMMachine& gmm);
+
+    /**
+     * @brief Assigns from a different MAP_GMMTrainer
+     */
+    MAP_GMMTrainer& operator=(const MAP_GMMTrainer &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const MAP_GMMTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const MAP_GMMTrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const MAP_GMMTrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Set the GMM to use as a prior for MAP adaptation.
+     * Generally, this is a "universal background model" (UBM),
+     * also referred to as a "world model".
+     */
+    bool setPriorGMM(boost::shared_ptr<bob::learn::em::GMMMachine> prior_gmm);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset,
+     * and saves these as m_ss. Calculates the average
+     * log likelihood of the observations given the GMM,
+     * and returns this in average_log_likelihood.
+     *
+     * The statistics, m_ss, will be used in the mStep() that follows.
+     * Implements EMTrainer::eStep(double &)
+     */
+     void eStep(bob::learn::em::GMMMachine& gmm,
+      const blitz::Array<double,2>& data){
+      m_gmm_base_trainer.eStep(gmm,data);
+     }
+
+
+    /**
+     * @brief Performs a maximum a posteriori (MAP) update of the GMM
+     * parameters using the accumulated statistics in m_ss and the
+     * parameters of the prior model
+     * Implements EMTrainer::mStep()
+     */
+    void mStep(bob::learn::em::GMMMachine& gmm);
+
+    /**
+     * @brief Computes the likelihood using current estimates of the latent
+     * variables
+     */
+    double computeLikelihood(bob::learn::em::GMMMachine& gmm){
+      return m_gmm_base_trainer.computeLikelihood(gmm);
+    }    
+    
+    bool getReynoldsAdaptation()
+    {return m_reynolds_adaptation;}
+
+    void setReynoldsAdaptation(const bool reynolds_adaptation)
+    {m_reynolds_adaptation = reynolds_adaptation;}
+    
+
+    double getRelevanceFactor()
+    {return m_relevance_factor;}
+
+    void setRelevanceFactor(const double relevance_factor)
+    {m_relevance_factor = relevance_factor;}
+
+
+    double getAlpha()
+    {return m_alpha;}
+
+    void setAlpha(const double alpha)
+    {m_alpha = alpha;}
+
+
+  protected:
+
+    /**
+     * The relevance factor for MAP adaptation, r (see Reynolds et al., \"Speaker Verification Using Adapted Gaussian Mixture Models\", Digital Signal Processing, 2000).
+     */
+    double m_relevance_factor;
+
+    /**
+    Base Trainer for the MAP algorithm. Basically implements the e-step
+    */ 
+    bob::learn::em::GMMBaseTrainer m_gmm_base_trainer;
+
+    /**
+     * The GMM to use as a prior for MAP adaptation.
+     * Generally, this is a "universal background model" (UBM),
+     * also referred to as a "world model"
+     */
+    boost::shared_ptr<bob::learn::em::GMMMachine> m_prior_gmm;
+
+    /**
+     * The alpha for the Torch3-like adaptation
+     */
+    double m_alpha;
+    /**
+     * Whether Torch3-like adaptation should be used or not
+     */
+    bool m_reynolds_adaptation;
+
+  private:
+    /// cache to avoid re-allocation
+    mutable blitz::Array<double,1> m_cache_alpha;
+    mutable blitz::Array<double,1> m_cache_ml_weights;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_MAP_GMMTRAINER_H
diff --git a/bob/learn/em/include/bob.learn.em/ML_GMMTrainer.h b/bob/learn/em/include/bob.learn.em/ML_GMMTrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..58e8a7621ed2b6570567d2561f7d41d29fcf16a8
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/ML_GMMTrainer.h
@@ -0,0 +1,119 @@
+/**
+ * @date Tue May 10 11:35:58 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ *
+ * @brief This class implements the maximum likelihood M-step of the expectation-maximisation algorithm for a GMM Machine.
+ * @details See Section 9.2.2 of Bishop, "Pattern recognition and machine learning", 2006
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_ML_GMMTRAINER_H
+#define BOB_LEARN_EM_ML_GMMTRAINER_H
+
+#include <bob.learn.em/GMMBaseTrainer.h>
+#include <limits>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class implements the maximum likelihood M-step of the
+ *   expectation-maximisation algorithm for a GMM Machine.
+ * @details See Section 9.2.2 of Bishop,
+ *  "Pattern recognition and machine learning", 2006
+ */
+class ML_GMMTrainer{
+  public:
+    /**
+     * @brief Default constructor
+     */
+    ML_GMMTrainer(const bool update_means=true,
+                  const bool update_variances=false, 
+                  const bool update_weights=false,
+                  const double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon());
+
+    /**
+     * @brief Copy constructor
+     */
+    ML_GMMTrainer(const ML_GMMTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~ML_GMMTrainer();
+
+    /**
+     * @brief Initialisation before the EM steps
+     */
+    void initialize(bob::learn::em::GMMMachine& gmm);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset,
+     * and saves these as m_ss. Calculates the average
+     * log likelihood of the observations given the GMM,
+     * and returns this in average_log_likelihood.
+     *
+     * The statistics, m_ss, will be used in the mStep() that follows.
+     * Implements EMTrainer::eStep(double &)
+     */
+     void eStep(bob::learn::em::GMMMachine& gmm,
+      const blitz::Array<double,2>& data){
+      m_gmm_base_trainer.eStep(gmm,data);
+     }
+
+    /**
+     * @brief Performs a maximum likelihood (ML) update of the GMM parameters
+     * using the accumulated statistics in m_ss
+     * Implements EMTrainer::mStep()
+     */
+    void mStep(bob::learn::em::GMMMachine& gmm);
+
+    /**
+     * @brief Computes the likelihood using current estimates of the latent
+     * variables
+     */
+    double computeLikelihood(bob::learn::em::GMMMachine& gmm){
+      return m_gmm_base_trainer.computeLikelihood(gmm);
+    }
+
+
+    /**
+     * @brief Assigns from a different ML_GMMTrainer
+     */
+    ML_GMMTrainer& operator=(const ML_GMMTrainer &other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const ML_GMMTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const ML_GMMTrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const ML_GMMTrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+      
+    
+  protected:
+
+    /**
+    Base Trainer for the MAP algorithm. Basically implements the e-step
+    */ 
+    bob::learn::em::GMMBaseTrainer m_gmm_base_trainer;
+
+
+  private:
+    /**
+     * @brief Add cache to avoid re-allocation at each iteration
+     */
+    mutable blitz::Array<double,1> m_cache_ss_n_thresholded;
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_ML_GMMTRAINER_H
diff --git a/bob/learn/em/include/bob.learn.em/PLDAMachine.h b/bob/learn/em/include/bob.learn.em/PLDAMachine.h
new file mode 100644
index 0000000000000000000000000000000000000000..4f6362ddc322f9ef1b601c9cbee8d6239f8f6200
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/PLDAMachine.h
@@ -0,0 +1,702 @@
+/**
+ * @date Fri Oct 14 18:07:56 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Machines that implements the Probabilistic Linear Discriminant
+ *   Analysis Model of Prince and Helder,
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_PLDAMACHINE_H
+#define BOB_LEARN_EM_PLDAMACHINE_H
+
+#include <blitz/array.h>
+#include <bob.io.base/HDF5File.h>
+#include <map>
+#include <iostream>
+#include <stdexcept>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class is a container for the \f$F\f$, \f$G\f$ and \f$\Sigma\f$
+ * matrices and the mean vector \f$\mu\f$ of a PLDA model. This also
+ * precomputes useful matrices to make the model scalable.\n
+ * References:\n
+ * 1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis:
+ *     Applied to Face Recognition', Laurent El Shafey, Chris McCool,
+ *     Roy Wallace, Sebastien Marcel, TPAMI'2013
+ * 2. 'Probabilistic Linear Discriminant Analysis for Inference About
+ *     Identity', Prince and Elder, ICCV'2007\n
+ * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
+ *     Elder and Prince, TPAMI'2012
+ */
+class PLDABase
+{
+  public:
+    /**
+     * @brief Default constructor.\n Builds an otherwise invalid 0x0x0
+     * PLDABase.
+     */
+    PLDABase();
+    /**
+     * @brief Constructor, builds a new PLDABase.\n \f$F\f$, \f$G\f$
+     * and \f$\Sigma\f$ are initialized to the 'eye' matrix (matrix with 1's
+     * on the diagonal and 0 outside), and \f$\mu\f$ is initialized to 0.
+     *
+     * @param dim_d Dimensionality of the feature vector
+     * @param dim_f size of \f$F\f$ (dim_d x dim_f)
+     * @param dim_g size of \f$G\f$ (dim_d x dim_g)
+     * @param variance_threshold The smallest possible value of the variance
+     *                           (Ignored if set to 0.)
+     */
+    PLDABase(const size_t dim_d, const size_t dim_f,
+      const size_t dim_g, const double variance_threshold=0.);
+    /**
+     * @brief Copies another PLDABase
+     */
+    PLDABase(const PLDABase& other);
+    /**
+     * @brief Starts a new PLDABase from an existing configuration
+     * object.
+     * @param config HDF5 configuration file
+     */
+    PLDABase(bob::io::base::HDF5File& config);
+
+    /**
+     * @brief Just to virtualize the destructor
+     */
+    virtual ~PLDABase();
+
+    /**
+     * @brief Assigns from a different PLDABase
+     */
+    PLDABase& operator=(const PLDABase &other);
+
+    /**
+     * @brief Equal to.\n Even precomputed members such as \f$\alpha\f$,
+     * \f$\beta\f$ and \f$\gamma_a\f$'s are compared!
+     */
+    bool operator==(const PLDABase& b) const;
+    /**
+     * @brief Not equal to.\n Defined as the negation of operator==
+     */
+    bool operator!=(const PLDABase& b) const;
+    /**
+     * @brief Similar to.\n Even precomputed members such as \f$\alpha\f$,
+     * \f$\beta\f$ and \f$\gamma_a\f$'s are compared!
+     */
+    bool is_similar_to(const PLDABase& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets the
+     * current state.
+     * @param config HDF5 configuration file
+     */
+    void load(bob::io::base::HDF5File& config);
+    /**
+     * @brief Saves an existing machine to a configuration object.
+     * @param config HDF5 configuration file
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Resizes the PLDABase.
+     * @warning \f$F\f$, \f$G\f$, \f$\Sigma\f$, \f$\mu\f$ and the variance
+     * flooring thresholds will be reinitialized!
+     * @param dim_d Dimensionality of the feature vector
+     * @param dim_f Rank of \f$F\f$ (dim_d x dim_f)
+     * @param dim_g Rank of \f$G\f$ (dim_d x dim_g)
+     */
+    void resize(const size_t dim_d, const size_t dim_f, const size_t dim_g);
+
+    /**
+     * @brief Gets the \f$F\f$ subspace/matrix of the PLDA model
+     */
+    const blitz::Array<double,2>& getF() const
+    { return m_F; }
+    /**
+     * @brief Sets the \f$F\f$ subspace/matrix of the PLDA model
+     */
+    void setF(const blitz::Array<double,2>& F);
+    /**
+     * @brief Returns the current \f$F\f$ matrix/subspace of the PLDA model
+     * in order to be updated.
+     * @warning Use with care. Only trainers should use this function for
+     * efficiency reasons.
+     */
+    blitz::Array<double,2>& updateF()
+    { return m_F; }
+
+    /**
+     * @brief Gets the \f$G\f$ subspace/matrix of the PLDA model
+     */
+    const blitz::Array<double,2>& getG() const
+    { return m_G; }
+    /**
+     * @brief Sets the \f$G\f$ subspace/matrix of the PLDA model
+     */
+    void setG(const blitz::Array<double,2>& G);
+    /**
+     * @brief Returns the current \f$G\f$ subspace/matrix of the PLDA model
+     * in order to be updated.
+     * @warning Use with care. Only trainers should use this function for
+     * efficiency reasons.
+     */
+    blitz::Array<double,2>& updateG()
+    { return m_G; }
+
+    /**
+     * @brief Gets the \f$\Sigma\f$ (diagonal) covariance matrix of the PLDA
+     * model
+     */
+    const blitz::Array<double,1>& getSigma() const
+    { return m_sigma; }
+    /**
+     * @brief Sets the \f$\Sigma\f$ (diagonal) covariance matrix of the PLDA
+     * model
+     */
+    void setSigma(const blitz::Array<double,1>& s);
+    /**
+     * @brief Returns the current \f$\Sigma\f$ (diagonal) covariance matrix of
+     * the PLDA model in order to be updated.
+     * @warning Use with care. Only trainers should use this function for
+     * efficiency reasons. Variance threshold should be applied after
+     * updating \f$\Sigma\f$!
+     */
+    blitz::Array<double,1>& updateSigma()
+    { return m_sigma; }
+
+    /**
+     * @brief Gets the \f$\mu\f$ mean vector of the PLDA model
+     */
+    const blitz::Array<double,1>& getMu() const
+    { return m_mu; }
+    /**
+     * @brief Sets the \f$\mu\f$ mean vector of the PLDA model
+     */
+    void setMu(const blitz::Array<double,1>& mu);
+    /**
+     * @brief Returns the current \f$\mu\f$ mean vector of the PLDA model
+     * in order to be updated.
+     * @warning Use with care. Only trainers should use this function for
+     * efficiency reasons.
+     */
+    blitz::Array<double,1>& updateMu()
+    { return m_mu; }
+
+    /**
+     * @brief Gets the variance flooring threshold
+     */
+    double getVarianceThreshold() const
+    { return m_variance_threshold; }
+    /**
+     * @brief Sets the variance flooring threshold
+     */
+    void setVarianceThreshold(const double value);
+    /**
+     * @brief Apply the variance flooring thresholds.
+     * This method is automatically called when using setVarianceThresholds().
+     * @warning It is only useful when using updateVarianceThreshods(),
+     * and should mostly be done by trainers
+     */
+    void applyVarianceThreshold();
+
+    /**
+     * @brief Gets the feature dimensionality
+     */
+    size_t getDimD() const
+    { return m_dim_d; }
+    /**
+     * @brief Gets the size/rank the \f$F\f$ subspace/matrix of the PLDA model
+     */
+    size_t getDimF() const
+    { return m_dim_f; }
+    /**
+     * @brief Gets the size/rank the \f$G\f$ subspace/matrix of the PLDA model
+     */
+    size_t getDimG() const
+    { return m_dim_g; }
+
+    /**
+     * @brief Precomputes useful values such as \f$\Sigma^{-1}\f$,
+     * \f$G^{T}\Sigma^{-1}\f$, \f$\alpha\f$, \f$\beta\f$, and
+     * \f$F^{T}\beta\f$.
+     * @warning Previous \f$\gamma_a\f$ values and log likelihood constant
+     * terms are cleared.
+     */
+    void precompute();
+    /**
+     * @brief Precomputes useful values for the log likelihood
+     * \f$\log(\det(\alpha))\f$ and \f$\log(\det(\Sigma))\f$.
+     */
+    void precomputeLogLike();
+    /**
+     * @brief Gets the inverse vector/diagonal matrix of \f$\Sigma^{-1}\f$
+     */
+    const blitz::Array<double,1>& getISigma() const
+    { return m_cache_isigma; }
+    /**
+     * @brief Gets the \f$\alpha\f$ matrix.
+     * \f$\alpha = (Id + G^T \Sigma^{-1} G)^{-1} = \mathcal{G}\f$
+     */
+    const blitz::Array<double,2>& getAlpha() const
+    { return m_cache_alpha; }
+    /**
+     * @brief Gets the \f$\beta\f$ matrix
+     * \f$\beta = (\Sigma + G G^T)^{-1} = \mathcal{S} =
+     *    \Sigma^{-1} - \Sigma^{-1} G \mathcal{G} G^{T} \Sigma^{-1}\f$
+     */
+    const blitz::Array<double,2>& getBeta() const
+    { return m_cache_beta; }
+    /**
+     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
+     * samples).
+     * \f$\gamma_{a} = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
+     * @warning an exception is thrown if \f$\gamma_a\f$ does not exists
+     */
+    const blitz::Array<double,2>& getGamma(const size_t a) const;
+    /**
+     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
+     * samples).
+     * \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
+     * @warning The matrix is computed if it does not already exists
+     */
+    const blitz::Array<double,2>& getAddGamma(const size_t a);
+    /**
+     * @brief Gets the \f$F^T \beta\f$ matrix
+     */
+    const blitz::Array<double,2>& getFtBeta() const
+    { return m_cache_Ft_beta; }
+    /**
+     * @brief Gets the \f$G^T \Sigma^{-1}\f$ matrix
+     */
+    const blitz::Array<double,2>& getGtISigma() const
+    { return m_cache_Gt_isigma; }
+    /**
+     * @brief Gets \f$\log(\det(\alpha))\f$
+     */
+    double getLogDetAlpha() const
+    { return m_cache_logdet_alpha; }
+    /**
+     * @brief Gets \f$\log(\det(\Sigma))\f$
+     */
+    double getLogDetSigma() const
+    { return m_cache_logdet_sigma; }
+    /**
+     * @brief Computes the log likelihood constant term for a given \f$a\f$
+     * (number of samples), given the provided \f$\gamma_a\f$ matrix
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     */
+    double computeLogLikeConstTerm(const size_t a,
+      const blitz::Array<double,2>& gamma_a) const;
+    /**
+     * @brief Computes the log likelihood constant term for a given \f$a\f$
+     * (number of samples)
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     * @warning: gamma_a will be computed and added if it does
+     *  not already exists
+     */
+    double computeLogLikeConstTerm(const size_t a);
+    /**
+     * @brief Tells if the log likelihood constant term for a given \f$a\f$
+     * (number of samples) exists
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     */
+    bool hasLogLikeConstTerm(const size_t a) const
+    { return (m_cache_loglike_constterm.find(a) != m_cache_loglike_constterm.end()); }
+    /**
+     * @brief Gets the log likelihood constant term for a given \f$a\f$
+     * (number of samples)
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     * @warning an exception is thrown if the value does not exists
+     */
+    double getLogLikeConstTerm(const size_t a) const;
+    /**
+     * @brief Gets the log likelihood constant term for a given \f$a\f$
+     * (number of samples)
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     * @warning The value is computed if it does not already exists
+     */
+    double getAddLogLikeConstTerm(const size_t a);
+
+    /**
+     * @brief Computes the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number
+     * of samples) and put the result in the provided array.
+     * \f$\gamma_a = (Id + a F^T \beta F)^{-1}\f$
+     */
+    void computeGamma(const size_t a, blitz::Array<double,2> res) const;
+    /**
+     * @brief Tells if the \f$\gamma_a\f$ matrix for a given a (number of
+     * samples) exists.
+     * \f$\gamma_a = (Id + a F^T \beta F)^{-1}\f$
+     */
+    bool hasGamma(const size_t a) const
+    { return (m_cache_gamma.find(a) != m_cache_gamma.end()); }
+
+    /**
+     * @brief Clears the maps (\f$\gamma_a\f$ and loglike_constterm_a).
+     */
+    void clearMaps();
+
+    /**
+     * @brief Gets the log-likelihood of an observation, given the current model
+     * and the latent variables (point estimate).\n
+     * This will basically compute \f$p(x_{ij} | h_{i}, w_{ij}, \Theta)\f$\n
+     * , given by \n
+     * \f$\mathcal{N}(x_{ij}|[\mu + F h_{i} + G w_{ij} + \epsilon_{ij}, \Sigma])\f$\n
+     * , which is in logarithm, \n
+     * \f$-\frac{D}{2} log(2\pi) -\frac{1}{2} log(det(\Sigma)) -\frac{1}{2} {(x_{ij}-(\mu+F h_{i}+G w_{ij}))^{T}\Sigma^{-1}(x_{ij}-(\mu+F h_{i}+G w_{ij}))}\f$.
+     */
+    double computeLogLikelihoodPointEstimate(const blitz::Array<double,1>& xij,
+      const blitz::Array<double,1>& hi, const blitz::Array<double,1>& wij) const;
+
+    // Friend method declaration
+    friend std::ostream& operator<<(std::ostream& os, const PLDABase& m);
+
+
+  private:
+    // Attributes
+    size_t m_dim_d; ///< Dimensionality of the input feature vector
+    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
+    size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
+    blitz::Array<double,2> m_F; ///< \f$F\f$ subspace of the PLDA model
+    blitz::Array<double,2> m_G; ///< \f$G\f$ subspace of the PLDA model
+    /**
+     * @brief \f$\Sigma\f$ diagonal (by assumption) covariance matrix of the
+     * PLDA model
+     */
+    blitz::Array<double,1> m_sigma;
+    blitz::Array<double,1> m_mu; ///< \f$\mu\f$ mean vector of the PLDA model
+    /**
+     * @brief The variance flooring thresholds, i.e. the minimum allowed
+     * value of variance m_sigma in each dimension.
+     * The variance will be set to this value if an attempt is made
+     * to set it to a smaller value.
+     */
+    double m_variance_threshold;
+
+    // Internal values very useful used to optimize the code
+    blitz::Array<double,1> m_cache_isigma; ///< \f$\Sigma^{-1}\f$
+    blitz::Array<double,2> m_cache_alpha; ///< \f$\alpha = (Id + G^T \Sigma^{-1} G)^{-1}\f$
+    /**
+     * @brief \f$\beta = (\Sigma+G G^T)^{-1} = (\Sigma^{-1} - \Sigma^{-1} G \alpha G^T \Sigma^{-1})^{-1}\f$
+     */
+    blitz::Array<double,2> m_cache_beta;
+    std::map<size_t, blitz::Array<double,2> > m_cache_gamma; ///< \f$\gamma_{a} = (Id + a F^T \beta F)^{-1}\f$
+    blitz::Array<double,2> m_cache_Ft_beta; ///< \f$F^{T} \beta \f$
+    blitz::Array<double,2> m_cache_Gt_isigma; ///< \f$G^{T} \Sigma^{-1} \f$
+    double m_cache_logdet_alpha; ///< \f$\log(\det(\alpha))\f$
+    double m_cache_logdet_sigma; ///< \f$\log(\det(\Sigma))\f$
+    /**
+     * @brief \f$l_{a} = \frac{a}{2} ( -D log(2*\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     */
+    std::map<size_t, double> m_cache_loglike_constterm;
+
+    // working arrays
+    mutable blitz::Array<double,1> m_tmp_d_1; ///< Cache vector of size dim_d
+    mutable blitz::Array<double,1> m_tmp_d_2; ///< Cache vector of size dim_d
+    mutable blitz::Array<double,2> m_tmp_d_ng_1; ///< Cache matrix of size dim_d x dim_g
+    mutable blitz::Array<double,2> m_tmp_nf_nf_1; ///< Cache matrix of size dim_f x dim_f
+    mutable blitz::Array<double,2> m_tmp_ng_ng_1; ///< Cache matrix of size dim_g x dim_g
+
+    // private methods
+    void resizeNoInit(const size_t dim_d, const size_t dim_f, const size_t dim_g);
+    void resizeTmp();
+    void initMuFGSigma();
+    void precomputeISigma();
+    void precomputeAlpha();
+    void precomputeBeta();
+    void precomputeGamma(const size_t a);
+    void precomputeFtBeta();
+    void precomputeGtISigma();
+    void precomputeLogDetAlpha();
+    void precomputeLogDetSigma();
+    void precomputeLogLikeConstTerm(const size_t a);
+};
+
+
+/**
+ * @brief This class is a container for an enrolled identity/class. It
+ * contains information extracted from the enrollment samples. It should
+ * be used in combination with a PLDABase instance.\n
+ * References:\n
+ * 1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis:
+ *     Applied to Face Recognition', Laurent El Shafey, Chris McCool,
+ *     Roy Wallace, Sebastien Marcel, TPAMI'2013
+ * 2. 'Probabilistic Linear Discriminant Analysis for Inference About
+ *     Identity', Prince and Elder, ICCV'2007\n
+ * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
+ *     Elder and Prince, TPAMI'2012
+ */
+class PLDAMachine
+{
+  public:
+    /**
+     * @brief Default constructor.\n
+     * Builds an otherwise invalid (No attached PLDABase) PLDAMachine.
+     */
+    PLDAMachine();
+    /**
+     * @brief Constructor, builds a new PLDAMachine, setting a
+     * PLDABase.
+     */
+    PLDAMachine(const boost::shared_ptr<bob::learn::em::PLDABase> pldabase);
+    /**
+     * @brief Copies another PLDAMachine.\n Both PLDAMachine's will point
+     * to the same PLDABase.
+     */
+    PLDAMachine(const PLDAMachine& other);
+    /**
+     * @brief Starts a new PLDAMachine from an existing configuration object,
+     * and a PLDABase.
+     */
+    PLDAMachine(bob::io::base::HDF5File& config,
+      const boost::shared_ptr<bob::learn::em::PLDABase> pldabase);
+
+    /**
+     * @brief Just to virtualise the destructor
+     */
+    virtual ~PLDAMachine();
+
+    /**
+     * @brief Assigns from a different machine
+     */
+    PLDAMachine& operator=(const PLDAMachine &other);
+
+    /**
+     * @brief Equal to.\n The two PLDAMachine's should have the same
+     * PLDABase. Precomputed members such as \f$\gamma_a\f$'s
+     * are compared!
+     */
+    bool operator==(const PLDAMachine& b) const;
+    /**
+     * @brief Not equal to.\n Defined as the negation of operator==
+     */
+    bool operator!=(const PLDAMachine& b) const;
+    /**
+     * @brief Equal to.\n The two PLDAMachine's should have the same
+     * PLDABase. Precomputed members such as \f$\gamma_a\f$'s
+     * are compared!
+     */
+    bool is_similar_to(const PLDAMachine& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Loads data from an existing configuration object. Resets the
+     * current state.
+     */
+    void load(bob::io::base::HDF5File& config);
+    /**
+     * @brief Saves an existing machine to a configuration object.
+     */
+    void save(bob::io::base::HDF5File& config) const;
+
+    /**
+     * @brief Gets the attached PLDABase
+     */
+    const boost::shared_ptr<PLDABase> getPLDABase() const
+    { return m_plda_base; }
+    /**
+     * @brief Sets the attached PLDABase
+     */
+    void setPLDABase(const boost::shared_ptr<bob::learn::em::PLDABase> plda_base);
+
+    /**
+     * @brief Gets the feature dimensionality
+     */
+    size_t getDimD() const
+    { if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+      return m_plda_base->getDimD(); }
+    /**
+     * @brief Gets the size/rank the \f$F\f$ subspace/matrix of the PLDA model
+     */
+    size_t getDimF() const
+    { if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+      return m_plda_base->getDimF(); }
+    /**
+     * @brief Gets the size/rank the \f$G\f$ subspace/matrix of the PLDA model
+     */
+    size_t getDimG() const
+    { if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
+      return m_plda_base->getDimG(); }
+
+    /**
+     * @brief Gets the number of enrolled samples
+     */
+    uint64_t getNSamples() const
+    { return m_n_samples; }
+    /**
+     * @brief Sets the number of enrolled samples
+     */
+    void setNSamples(const uint64_t n_samples)
+    { m_n_samples = n_samples; }
+    /**
+     * @brief Gets the \f$A = -0.5 \sum_{i} x_{i}^T \beta x_{i}\f$ value
+     */
+    double getWSumXitBetaXi() const
+    { return m_nh_sum_xit_beta_xi; }
+    /**
+     * @brief Sets the \f$A = -0.5 \sum_{i} x_{i}^T \beta x_{i}\f$ value
+     */
+    void setWSumXitBetaXi(const double val)
+    { m_nh_sum_xit_beta_xi = val; }
+    /**
+     * @brief Gets the current \f$\sum_{i} F^T \beta x_{i}\f$ value
+     */
+    const blitz::Array<double,1>& getWeightedSum() const
+    { return m_weighted_sum; }
+    /**
+     * @brief Sets the \f$\sum_{i} F^T \beta x_{i}\f$ value
+     */
+    void setWeightedSum(const blitz::Array<double,1>& weighted_sum);
+    /**
+     * @brief Returns the current \f$\sum_{i} F^T \beta x_{i}\f$ value
+     * in order to be updated.
+     * @warning Use with care. Only trainers should use this function for
+     * efficiency reasons.
+     */
+    blitz::Array<double,1>& updateWeightedSum()
+    { return m_weighted_sum; }
+    /**
+     * @brief Gets the log likelihood of the enrollment samples
+     */
+    double getLogLikelihood() const
+    { return m_loglikelihood; }
+    /**
+     * @brief Sets the log likelihood of the enrollment samples
+     */
+    void setLogLikelihood(const double val)
+    { m_loglikelihood = val; }
+
+    /**
+     * @brief Tells if the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number
+     * of samples) exists in this machine (does not check the base machine)
+     * \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
+     */
+    bool hasGamma(const size_t a) const
+    { return (m_cache_gamma.find(a) != m_cache_gamma.end()); }
+    /**
+     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
+     * samples) \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
+     * Tries to find it from the base machine and then from this machine
+     * @warning an exception is thrown if gamma does not exists
+     */
+    const blitz::Array<double,2>& getGamma(const size_t a) const;
+    /**
+     * @brief Gets the \f$\gamma_a\f$ matrix for a given \f$a\f$ (number of
+     * samples) \f$\gamma_a = (Id + a F^T \beta F)^{-1} = \mathcal{F}_{a}\f$
+     * Tries to find it from the base machine and then from this machine
+     * @warning The matrix is computed if it does not already exists,
+     *   and stored in this machine
+     */
+    const blitz::Array<double,2>& getAddGamma(const size_t a);
+
+    /**
+     * @brief Tells if the log likelihood constant term for a given \f$a\f$
+     * (number of samples) exists in this machine
+     * (does not check the base machine)
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     */
+    bool hasLogLikeConstTerm(const size_t a) const
+    { return (m_cache_loglike_constterm.find(a) != m_cache_loglike_constterm.end()); }
+    /**
+     * @brief Gets the log likelihood constant term for a given \f$a\f$
+     * (number of samples)
+     * Tries to find it from the base machine and then from this machine
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     * @warning an exception is thrown if the value does not exists
+     */
+    double getLogLikeConstTerm(const size_t a) const;
+    /**
+     * @brief Gets the log likelihood constant term for a given \f$a\f$
+     * (number of samples)
+     * Tries to find it from the base machine and then from this machine
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     * @warning The value is computed if it does not already exists
+     */
+    double getAddLogLikeConstTerm(const size_t a);
+
+    /**
+     * @brief Clears the maps (\f$\gamma_a\f$ and loglike_constterm[a]).
+     */
+    void clearMaps();
+
+
+    /**
+     * @brief Compute the log-likelihood of the given sample and (optionally)
+     * the enrolled samples
+     */
+    double computeLogLikelihood(const blitz::Array<double,1>& sample,
+      bool with_enrolled_samples=true) const;
+    /**
+     * @brief Compute the log-likelihood of the given samples and (optionally)
+     * the enrolled samples
+     */
+    double computeLogLikelihood(const blitz::Array<double,2>& samples,
+      bool with_enrolled_samples=true) const;
+
+    /**
+     * @brief Computes a log likelihood ratio from a 1D or 2D blitz::Array
+     */
+    double forward(const blitz::Array<double,1>& sample);
+    double forward_(const blitz::Array<double,1>& sample);
+    double forward(const blitz::Array<double,2>& samples);
+
+
+  private:
+    /**
+     * @brief Associated PLDABase containing the model (\f$\mu\f$,
+     * \f$F\f$, \f$G\f$ and \f$\Sigma\f$)
+     */
+    boost::shared_ptr<PLDABase> m_plda_base;
+    uint64_t m_n_samples; ///< Number of enrollment samples
+    /**
+     * @brief Contains the value:\n
+     * \f$A = -0.5 (\sum_{i} x_{i}^{T} \Sigma^{-1} x_{i} - x_{i}^T \Sigma^{-1} G \alpha G^{T} \Sigma^{-1} x_{i})\f$\n
+     * \f$A = -0.5 \sum_{i} x_{i}^T \beta x_{i}\f$\n
+     * used in the likelihood computation (first \f$x_{i}\f$ dependent term)
+     */
+    double m_nh_sum_xit_beta_xi;
+    /**
+     * @brief Contains the value \f$\sum_{i} F^T \beta x_{i}\f$ used in the
+     * likelihood computation (for the second \f$x_{i}\f$ dependent term)
+     */
+    blitz::Array<double,1> m_weighted_sum;
+    double m_loglikelihood; ///< Log likelihood of the enrollment samples
+    /**
+     * @brief \f$\gamma_a\f$ balues which are not already in the
+     * PLDABase \f$\gamma_a = (Id + a F^T \beta F)^{-1}\f$
+     * (depend on the number of samples \f$a\f$)
+     */
+    std::map<size_t, blitz::Array<double,2> > m_cache_gamma;
+    /**
+     * @brief Log likelihood constant terms which depend on the number of
+     * samples \f$a\f$
+     * \f$l_{a} = \frac{a}{2} ( -D log(2\pi) -log|\Sigma| +log|\alpha| +log|\gamma_a|)\f$
+     */
+    std::map<size_t, double> m_cache_loglike_constterm;
+
+
+    // working arrays
+    mutable blitz::Array<double,1> m_tmp_d_1; ///< Cache vector of size dim_d
+    mutable blitz::Array<double,1> m_tmp_d_2; ///< Cache vector of size dim_d
+    mutable blitz::Array<double,1> m_tmp_nf_1; ///< Cache vector of size dim_f
+    mutable blitz::Array<double,1> m_tmp_nf_2; ///< Cache vector of size dim_f
+    mutable blitz::Array<double,2> m_tmp_nf_nf_1; ///< Cache vector of size dim_f dim_f
+
+    /**
+     * @brief Resizes the PLDAMachine
+     */
+    void resize(const size_t dim_d, const size_t dim_f, const size_t dim_g);
+    /**
+     * @brief Resize working arrays
+     */
+    void resizeTmp();
+};
+
+} } } // namespaces
+
+#endif // BOB_LEARN_EM_PLDAMACHINE_H
diff --git a/bob/learn/em/include/bob.learn.em/PLDATrainer.h b/bob/learn/em/include/bob.learn.em/PLDATrainer.h
new file mode 100644
index 0000000000000000000000000000000000000000..7ea35089ad4f321b35142ca8e9519592a1a892af
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/PLDATrainer.h
@@ -0,0 +1,310 @@
+/**
+ * @date Fri Oct 14 18:07:56 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Probabilistic PLDA Discriminant Analysis implemented using
+ * Expectation Maximization.
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_PLDA_TRAINER_H
+#define BOB_LEARN_EM_PLDA_TRAINER_H
+
+#include <bob.learn.em/PLDAMachine.h>
+#include <boost/shared_ptr.hpp>
+#include <vector>
+#include <map>
+#include <bob.core/array_copy.h>
+#include <boost/random.hpp>
+#include <boost/random/mersenne_twister.hpp>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * @brief This class can be used to train the \f$F\f$, \f$G\f$ and
+ * \f$\Sigma\f$ matrices and the mean vector \f$\mu\f$ of a PLDA model.\n
+ * References:\n
+ * 1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis:
+ *     Applied to Face Recognition', Laurent El Shafey, Chris McCool,
+ *     Roy Wallace, Sebastien Marcel, TPAMI'2013
+ * 2. 'Probabilistic Linear Discriminant Analysis for Inference About
+ *     Identity', Prince and Elder, ICCV'2007\n
+ * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
+ *     Elder and Prince, TPAMI'2012
+ */
+class PLDATrainer
+{
+  public: //api
+    /**
+     * @brief Default constructor.\n Initializes a new PLDA trainer. The
+     * training stage will place the resulting components in the
+     * PLDABase.
+     */
+    PLDATrainer(const bool use_sum_second_order);
+
+    /**
+     * @brief Copy constructor
+     */
+    PLDATrainer(const PLDATrainer& other);
+
+    /**
+     * @brief (virtual) Destructor
+     */
+    virtual ~PLDATrainer();
+
+    /**
+     * @brief Assignment operator
+     */
+    PLDATrainer& operator=(const PLDATrainer& other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const PLDATrainer& other) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const PLDATrainer& other) const;
+
+    /**
+     * @brief Similarity operator
+     */
+    bool is_similar_to(const PLDATrainer& b,
+      const double r_epsilon=1e-5, const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief Performs some initialization before the E- and M-steps.
+     */
+    void initialize(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+    /**
+     * @brief Performs some actions after the end of the E- and M-steps.
+      */
+    void finalize(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset, and saves
+     * these as m_z_{first,second}_order.
+     * The statistics will be used in the mStep() that follows.
+     */
+    void eStep(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+
+    /**
+     * @brief Performs a maximization step to update the parameters of the
+     * PLDABase
+     */
+    void mStep(bob::learn::em::PLDABase& machine,
+       const std::vector<blitz::Array<double,2> >& v_ar);
+
+
+    /**
+     * @brief Sets whether the second order statistics are stored during the
+     * training procedure, or only their sum.
+     */
+    void setUseSumSecondOrder(bool v) { m_use_sum_second_order = v; }
+    /**
+     * @brief Tells whether the second order statistics are stored during the
+     * training procedure, or only their sum.
+     */
+    bool getUseSumSecondOrder() const
+    { return m_use_sum_second_order; }
+
+    /**
+     * @brief This enum defines different methods for initializing the \f$F\f$
+     * subspace
+     */
+    typedef enum {
+      RANDOM_F=0,
+      BETWEEN_SCATTER=1
+    }
+    InitFMethod;
+    /**
+     * @brief This enum defines different methods for initializing the \f$G\f$
+     * subspace
+     */
+    typedef enum {
+      RANDOM_G=0,
+      WITHIN_SCATTER=1
+    }
+    InitGMethod;
+    /**
+     * @brief This enum defines different methods for initializing the
+     * \f$\Sigma\f$ covariance matrix
+     */
+    typedef enum {
+      RANDOM_SIGMA=0,
+      VARIANCE_G=1,
+      CONSTANT=2,
+      VARIANCE_DATA=3
+    }
+    InitSigmaMethod;
+    /**
+     * @brief Sets the method used to initialize \f$F\f$
+     */
+    void setInitFMethod(const InitFMethod m) { m_initF_method = m; }
+    /**
+     * @brief Gets the method used to initialize \f$F\f$
+     */
+    InitFMethod getInitFMethod() const { return m_initF_method; }
+    /**
+     * @brief Sets the ratio value used to initialize \f$F\f$
+     */
+    void setInitFRatio(double d) { m_initF_ratio = d; }
+    /**
+     * @brief Gets the ratio value used to initialize \f$F\f$
+     */
+    double getInitFRatio() const { return m_initF_ratio; }
+    /**
+     * @brief Sets the method used to initialize \f$G\f$
+     */
+    void setInitGMethod(const InitGMethod m) { m_initG_method = m; }
+    /**
+     * @brief Gets the method used to initialize \f$G\f$
+     */
+    InitGMethod getInitGMethod() const { return m_initG_method; }
+    /**
+     * @brief Sets the ratio value used to initialize \f$G\f$
+     */
+    void setInitGRatio(double d) { m_initG_ratio = d; }
+    /**
+     * @brief Gets the ratio value used to initialize \f$G\f$
+     */
+    double getInitGRatio() const { return m_initG_ratio; }
+    /**
+     * @brief Sets the method used to initialize \f$\Sigma\f$
+     */
+    void setInitSigmaMethod(const InitSigmaMethod m)
+    { m_initSigma_method = m; }
+    /**
+     * @brief Gets the method used to initialize \f$\Sigma\f$
+     */
+    InitSigmaMethod getInitSigmaMethod() const
+    { return m_initSigma_method; }
+    /**
+     * @brief Sets the ratio value used to initialize \f$\Sigma\f$
+     */
+    void setInitSigmaRatio(double d) { m_initSigma_ratio = d; }
+    /**
+     * @brief Gets the ratio value used to initialize \f$\Sigma\f$
+     */
+    double getInitSigmaRatio() const { return m_initSigma_ratio; }
+
+    /**
+     * @brief Gets the z first order statistics (mostly for test purposes)
+     */
+    const std::vector<blitz::Array<double,2> >& getZFirstOrder() const
+    { return m_cache_z_first_order;}
+    /**
+     * @brief Gets the z second order statistics (mostly for test purposes)
+     */
+    const blitz::Array<double,2>& getZSecondOrderSum() const
+    { return m_cache_sum_z_second_order;}
+    /**
+     * @brief Gets the z second order statistics (mostly for test purposes)
+     */
+    const std::vector<blitz::Array<double,3> >& getZSecondOrder() const
+    { if(m_use_sum_second_order)
+        throw std::runtime_error("You should disable the use_sum_second_order flag to use this feature");
+      return m_cache_z_second_order;
+    }
+
+    /**
+     * @brief Main procedure for enrolling a PLDAMachine
+     */
+    void enrol(bob::learn::em::PLDAMachine& plda_machine,
+      const blitz::Array<double,2>& ar) const;
+      
+      
+    /**
+     * @brief Sets the Random Number Generator
+     */
+    void setRng(const boost::shared_ptr<boost::mt19937> rng)
+    { m_rng = rng; }
+
+    /**
+     * @brief Gets the Random Number Generator
+     */
+    const boost::shared_ptr<boost::mt19937> getRng() const
+    { return m_rng; }      
+
+  private:
+  
+    boost::shared_ptr<boost::mt19937> m_rng;
+  
+    //representation
+    size_t m_dim_d; ///< Dimensionality of the input features
+    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
+    size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
+    bool m_use_sum_second_order; ///< If set, only the sum of the second order statistics is stored/allocated
+    InitFMethod m_initF_method; ///< Initialization method for \f$F\f$
+    double m_initF_ratio; ///< Ratio/factor used for the initialization of \f$F\f$
+    InitGMethod m_initG_method; ///< Initialization method for \f$G\f$
+    double m_initG_ratio; ///< Ratio/factor used for the initialization of \f$G\f$
+    InitSigmaMethod m_initSigma_method; ///< Initialization method for \f$\Sigma\f$
+    double m_initSigma_ratio; ///< Ratio/factor used for the initialization of \f$\Sigma\f$
+
+    // Statistics and covariance computed during the training process
+    blitz::Array<double,2> m_cache_S; ///< Covariance of the training data
+    std::vector<blitz::Array<double,2> > m_cache_z_first_order; ///< Current mean of the z_{n} latent variable (1 for each sample)
+    blitz::Array<double,2> m_cache_sum_z_second_order; ///< Current sum of the covariance of the z_{n} latent variable
+    std::vector<blitz::Array<double,3> > m_cache_z_second_order; ///< Current covariance of the z_{n} latent variable
+    // Precomputed
+    /**
+     * @brief Number of training samples for each individual in the training set
+     */
+    std::vector<size_t> m_cache_n_samples_per_id;
+    /**
+     * @brief Tells if there is an identity with a 'key'/particular number of
+     * training samples, and if corresponding matrices are up to date.
+     */
+    std::map<size_t,bool> m_cache_n_samples_in_training;
+    blitz::Array<double,2> m_cache_B; ///< \f$B = [F, G]\f$ (size nfeatures x (m_dim_f+m_dim_g) )
+    blitz::Array<double,2> m_cache_Ft_isigma_G; ///< \f$F^T \Sigma^-1 G\f$
+    blitz::Array<double,2> m_cache_eta; ///< \f$F^T \Sigma^-1 G \alpha\f$
+    // Blocks (with \f$\gamma_{a}\f$) of \f$(Id + A^T \Sigma'^-1 A)^-1\f$ (efficient inversion)
+    std::map<size_t,blitz::Array<double,2> > m_cache_zeta; ///< \f$\zeta_{a} = \alpha + \eta^T \gamma_{a} \eta\f$
+    std::map<size_t,blitz::Array<double,2> > m_cache_iota; ///< \f$\iota_{a} = -\gamma_{a} \eta\f$
+
+    // Working arrays
+    mutable blitz::Array<double,1> m_tmp_nf_1; ///< vector of dimension dim_f
+    mutable blitz::Array<double,1> m_tmp_nf_2; ///< vector of dimension dim_f
+    mutable blitz::Array<double,1> m_tmp_ng_1; ///< vector of dimension dim_f
+    mutable blitz::Array<double,1> m_tmp_D_1; ///< vector of dimension dim_d
+    mutable blitz::Array<double,1> m_tmp_D_2; ///< vector of dimension dim_d
+    mutable blitz::Array<double,2> m_tmp_nfng_nfng; ///< matrix of dimension (dim_f+dim_g)x(dim_f+dim_g)
+    mutable blitz::Array<double,2> m_tmp_D_nfng_1; ///< matrix of dimension (dim_d)x(dim_f+dim_g)
+    mutable blitz::Array<double,2> m_tmp_D_nfng_2; ///< matrix of dimension (dim_d)x(dim_f+dim_g)
+
+    // internal methods
+    void computeMeanVariance(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+    void initMembers(const std::vector<blitz::Array<double,2> >& v_ar);
+    void initFGSigma(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+    void initF(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+    void initG(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+    void initSigma(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+
+    void checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar);
+    void precomputeFromFGSigma(bob::learn::em::PLDABase& machine);
+    void precomputeLogLike(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+
+    void updateFG(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+    void updateSigma(bob::learn::em::PLDABase& machine,
+      const std::vector<blitz::Array<double,2> >& v_ar);
+
+    void resizeTmp();
+};
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_EM_PLDA_TRAINER_H */
diff --git a/bob/learn/em/include/bob.learn.em/ZTNorm.h b/bob/learn/em/include/bob.learn.em/ZTNorm.h
new file mode 100644
index 0000000000000000000000000000000000000000..d8e953d3b7d44ec66fd60e5acd652479e6733410
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/ZTNorm.h
@@ -0,0 +1,89 @@
+/**
+ * @date Tue Jul 19 15:33:20 2011 +0200
+ * @author Francois Moulin <Francois.Moulin@idiap.ch>
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_EM_ZTNORM_H
+#define BOB_LEARN_EM_ZTNORM_H
+
+#include <blitz/array.h>
+
+namespace bob { namespace learn { namespace em {
+
+/**
+ * Normalise raw scores with ZT-Norm
+ *
+ * @exception std::runtime_error matrix sizes are not consistent
+ *
+ * @param rawscores_probes_vs_models
+ * @param rawscores_zprobes_vs_models
+ * @param rawscores_probes_vs_tmodels
+ * @param rawscores_zprobes_vs_tmodels
+ * @param mask_zprobes_vs_tmodels_istruetrial
+ * @param[out] normalizedscores normalized scores
+ * @warning The destination score array should have the correct size
+ *          (Same size as rawscores_probes_vs_models)
+ */
+void ztNorm(const blitz::Array<double, 2>& rawscores_probes_vs_models,
+            const blitz::Array<double, 2>& rawscores_zprobes_vs_models,
+            const blitz::Array<double, 2>& rawscores_probes_vs_tmodels,
+            const blitz::Array<double, 2>& rawscores_zprobes_vs_tmodels,
+            const blitz::Array<bool,   2>& mask_zprobes_vs_tmodels_istruetrial,
+            blitz::Array<double, 2>& normalizedscores);
+
+/**
+ * Normalise raw scores with ZT-Norm.
+ * Assume that znorm and tnorm have no common subject id.
+ *
+ * @exception std::runtime_error matrix sizes are not consistent
+ *
+ * @param rawscores_probes_vs_models
+ * @param rawscores_zprobes_vs_models
+ * @param rawscores_probes_vs_tmodels
+ * @param rawscores_zprobes_vs_tmodels
+ * @param[out] normalizedscores normalized scores
+ * @warning The destination score array should have the correct size
+ *          (Same size as rawscores_probes_vs_models)
+ */
+void ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
+            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
+            const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
+            blitz::Array<double,2>& normalizedscores);
+
+/**
+ * Normalise raw scores with T-Norm.
+ *
+ * @exception std::runtime_error matrix sizes are not consistent
+ *
+ * @param rawscores_probes_vs_models
+ * @param rawscores_probes_vs_tmodels
+ * @param[out] normalizedscores normalized scores
+ * @warning The destination score array should have the correct size
+ *          (Same size as rawscores_probes_vs_models)
+ */
+void tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+           const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
+           blitz::Array<double,2>& normalizedscores);
+
+/**
+ * Normalise raw scores with Z-Norm.
+ *
+ * @exception std::runtime_error matrix sizes are not consistent
+ *
+ * @param rawscores_probes_vs_models
+ * @param rawscores_zprobes_vs_models
+ * @param[out] normalizedscores normalized scores
+ * @warning The destination score array should have the correct size
+ *          (Same size as rawscores_probes_vs_models)
+ */
+void zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+           const blitz::Array<double,2>& rawscores_zprobes_vs_models,
+           blitz::Array<double,2>& normalizedscores);
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_EM_ZTNORM_H */
diff --git a/bob/learn/em/include/bob.learn.em/api.h b/bob/learn/em/include/bob.learn.em/api.h
new file mode 100644
index 0000000000000000000000000000000000000000..7548a302f751d0a20b13d5966affffed670b8bf6
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/api.h
@@ -0,0 +1,127 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Fri 21 Nov 10:38:48 2013
+ *
+ * @brief Python API for bob::learn::em
+ */
+
+#ifndef BOB_LEARN_EM_API_H
+#define BOB_LEARN_EM_API_H
+
+/* Define Module Name and Prefix for other Modules
+   Note: We cannot use BOB_EXT_* macros here, unfortunately */
+#define BOB_LEARN_EM_PREFIX    "bob.learn.em"
+#define BOB_LEARN_EM_FULL_NAME "bob.learn.em._library"
+
+#include <Python.h>
+
+#include <bob.learn.em/config.h>
+#include <boost/shared_ptr.hpp>
+
+/*******************
+ * C API functions *
+ *******************/
+
+/* Enum defining entries in the function table */
+enum _PyBobLearnEM_ENUM{
+  PyBobLearnEM_APIVersion_NUM = 0,
+  // bindings
+  ////PyBobIpBaseLBP_Type_NUM,
+  ////PyBobIpBaseLBP_Check_NUM,
+  ////PyBobIpBaseLBP_Converter_NUM,
+  // Total number of C API pointers
+  PyBobLearnEM_API_pointers
+};
+
+
+#ifdef BOB_LEARN_EM_MODULE
+
+  /* This section is used when compiling `bob.io.base' itself */
+
+  /**************
+   * Versioning *
+   **************/
+
+  extern int PyBobLearnEM_APIVersion;
+
+#else // BOB_LEARN_EM_MODULE
+
+  /* This section is used in modules that use `bob.io.base's' C-API */
+
+#if defined(NO_IMPORT_ARRAY)
+  extern void **PyBobLearnEM_API;
+#elif defined(PY_ARRAY_UNIQUE_SYMBOL)
+  void **PyBobLearnEM_API;
+#else
+  static void **PyBobLearnEM_API=NULL;
+#endif
+
+  /**************
+   * Versioning *
+   **************/
+
+#define PyBobLearnEM_APIVersion (*(int *)PyBobLearnEM_API[PyBobLearnEM_APIVersion_NUM])
+
+#if !defined(NO_IMPORT_ARRAY)
+
+  /**
+   * Returns -1 on error, 0 on success.
+   */
+  static int import_bob_learn_em(void) {
+
+    PyObject *c_api_object;
+    PyObject *module;
+
+    module = PyImport_ImportModule(BOB_LEARN_EM_FULL_NAME);
+
+    if (module == NULL) return -1;
+
+    c_api_object = PyObject_GetAttrString(module, "_C_API");
+
+    if (c_api_object == NULL) {
+      Py_DECREF(module);
+      return -1;
+    }
+
+#if PY_VERSION_HEX >= 0x02070000
+    if (PyCapsule_CheckExact(c_api_object)) {
+      PyBobLearnEM_API = (void **)PyCapsule_GetPointer(c_api_object, PyCapsule_GetName(c_api_object));
+    }
+#else
+    if (PyCObject_Check(c_api_object)) {
+      PyBobLearnEM_API = (void **)PyCObject_AsVoidPtr(c_api_object);
+    }
+#endif
+
+    Py_DECREF(c_api_object);
+    Py_DECREF(module);
+
+    if (!PyBobLearnEM_API) {
+      PyErr_SetString(PyExc_ImportError, "cannot find C/C++ API "
+#if PY_VERSION_HEX >= 0x02070000
+          "capsule"
+#else
+          "cobject"
+#endif
+          " at `" BOB_LEARN_EM_FULL_NAME "._C_API'");
+      return -1;
+    }
+
+    /* Checks that the imported version matches the compiled version */
+    int imported_version = *(int*)PyBobLearnEM_API[PyBobLearnEM_APIVersion_NUM];
+
+    if (BOB_LEARN_EM_API_VERSION != imported_version) {
+      PyErr_Format(PyExc_ImportError, BOB_LEARN_EM_FULL_NAME " import error: you compiled against API version 0x%04x, but are now importing an API with version 0x%04x which is not compatible - check your Python runtime environment for errors", BOB_LEARN_EM_API_VERSION, imported_version);
+      return -1;
+    }
+
+    /* If you get to this point, all is good */
+    return 0;
+
+  }
+
+#endif //!defined(NO_IMPORT_ARRAY)
+
+#endif /* BOB_LEARN_EM_MODULE */
+
+#endif /* BOB_LEARN_EM_API_H */
diff --git a/bob/learn/em/include/bob.learn.em/config.h b/bob/learn/em/include/bob.learn.em/config.h
new file mode 100644
index 0000000000000000000000000000000000000000..dacdd4a233043e86b01c734ef323bc1dac4dc15a
--- /dev/null
+++ b/bob/learn/em/include/bob.learn.em/config.h
@@ -0,0 +1,14 @@
+/**
+ * @author Manuel Guenther <manuel.guenther@idiap.ch>
+ * @date Thu Aug 21 20:49:42 CEST 2014
+ *
+ * @brief General directives for all modules in bob.learn.em
+ */
+
+#ifndef BOB_LEARN_EM_CONFIG_H
+#define BOB_LEARN_EM_CONFIG_H
+
+/* Macros that define versions and important names */
+#define BOB_LEARN_EM_API_VERSION 0x0200
+
+#endif /* BOB_LEARN_EM_CONFIG_H */
diff --git a/bob/learn/em/isv_base.cpp b/bob/learn/em/isv_base.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..95672953bba120a6150e175963767db3fc5d67e3
--- /dev/null
+++ b/bob/learn/em/isv_base.cpp
@@ -0,0 +1,528 @@
+/**
+ * @date Wed Jan 28 11:13:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto ISVBase_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".ISVBase",
+
+  "A ISVBase instance can be seen as a container for U and D when performing Joint Factor Analysis (JFA)."
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a ISVBase",
+    "",
+    true
+  )
+  .add_prototype("gmm,ru","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+  .add_prototype("","")
+
+  .add_parameter("gmm", ":py:class:`bob.learn.em.GMMMachine`", "The Universal Background Model.")
+  .add_parameter("ru", "int", "Size of U (Within client variation matrix). In the end the U matrix will have (number_of_gaussians * feature_dimension x ru)")
+  .add_parameter("other", ":py:class:`bob.learn.em.ISVBase`", "A ISVBase object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMISVBase_init_copy(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVBase_doc.kwlist(1);
+  PyBobLearnEMISVBaseObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVBase_Type, &o)){
+    ISVBase_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ISVBase(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMISVBase_init_hdf5(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVBase_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    ISVBase_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ISVBase(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMISVBase_init_ubm(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVBase_doc.kwlist(0);
+  
+  PyBobLearnEMGMMMachineObject* ubm;
+  int ru = 1;
+
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i", kwlist, &PyBobLearnEMGMMMachine_Type, &ubm, &ru)){
+    ISVBase_doc.print_usage();
+    return -1;
+  }
+  
+  if(ru < 0){
+    PyErr_Format(PyExc_TypeError, "ru argument must be greater than or equal to one");
+    return -1;
+  }
+  
+  self->cxx.reset(new bob::learn::em::ISVBase(ubm->cxx, ru));
+  return 0;
+}
+
+
+static int PyBobLearnEMISVBase_init(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+    
+  switch (nargs) {
+
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is Gaussian object
+     if (PyBobLearnEMISVBase_Check(arg))
+       return PyBobLearnEMISVBase_init_copy(self, args, kwargs);
+      // If the constructor input is a HDF5
+     else if (PyBobIoHDF5File_Check(arg))
+       return PyBobLearnEMISVBase_init_hdf5(self, args, kwargs);
+    }
+    case 2:
+      return PyBobLearnEMISVBase_init_ubm(self, args, kwargs);
+    default:
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      ISVBase_doc.print_usage();
+      return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create ISVBase", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMISVBase_delete(PyBobLearnEMISVBaseObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMISVBase_RichCompare(PyBobLearnEMISVBaseObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMISVBase_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMISVBaseObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare ISVBase objects", 0)
+}
+
+int PyBobLearnEMISVBase_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMISVBase_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int)",
+  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rU (within client variability matrix) `(#Gaussians, #Inputs, #rU)`.",
+  ""
+);
+PyObject* PyBobLearnEMISVBase_getShape(PyBobLearnEMISVBaseObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** supervector_length *****/
+static auto supervector_length = bob::extension::VariableDoc(
+  "supervector_length",
+  "int",
+
+  "Returns the supervector length."
+  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
+  
+  "@warning An exception is thrown if no Universal Background Model has been set yet."
+);
+PyObject* PyBobLearnEMISVBase_getSupervectorLength(PyBobLearnEMISVBaseObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("i", self->cxx->getSupervectorLength());
+  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
+}
+
+
+/***** u *****/
+static auto U = bob::extension::VariableDoc(
+  "u",
+  "array_like <float, 2D>",
+  "Returns the U matrix (within client variability matrix)",
+  ""
+);
+PyObject* PyBobLearnEMISVBase_getU(PyBobLearnEMISVBaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getU());
+  BOB_CATCH_MEMBER("``u`` could not be read", 0)
+}
+int PyBobLearnEMISVBase_setU(PyBobLearnEMISVBaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, U.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "u");
+  if (!b) return -1;
+  self->cxx->setU(*b);
+  return 0;
+  BOB_CATCH_MEMBER("``u`` matrix could not be set", -1)
+}
+
+
+/***** d *****/
+static auto D = bob::extension::VariableDoc(
+  "d",
+  "array_like <float, 1D>",
+  "Returns the diagonal matrix diag(d) (as a 1D vector)",
+  ""
+);
+PyObject* PyBobLearnEMISVBase_getD(PyBobLearnEMISVBaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getD());
+  BOB_CATCH_MEMBER("``d`` could not be read", 0)
+}
+int PyBobLearnEMISVBase_setD(PyBobLearnEMISVBaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, D.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "d");
+  if (!b) return -1;
+  self->cxx->setD(*b);
+  return 0;
+  BOB_CATCH_MEMBER("``d`` matrix could not be set", -1)
+}
+
+
+/***** ubm *****/
+static auto ubm = bob::extension::VariableDoc(
+  "ubm",
+  ":py:class:`bob.learn.em.GMMMachine`",
+  "Returns the UBM (Universal Background Model",
+  ""
+);
+PyObject* PyBobLearnEMISVBase_getUBM(PyBobLearnEMISVBaseObject* self, void*){
+  BOB_TRY
+
+  boost::shared_ptr<bob::learn::em::GMMMachine> ubm_gmmMachine = self->cxx->getUbm();
+
+  //Allocating the correspondent python object
+  PyBobLearnEMGMMMachineObject* retval =
+    (PyBobLearnEMGMMMachineObject*)PyBobLearnEMGMMMachine_Type.tp_alloc(&PyBobLearnEMGMMMachine_Type, 0);
+  retval->cxx = ubm_gmmMachine;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("ubm could not be read", 0)
+}
+int PyBobLearnEMISVBase_setUBM(PyBobLearnEMISVBaseObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMMachine_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.GMMMachine`", Py_TYPE(self)->tp_name, ubm.name());
+    return -1;
+  }
+
+  PyBobLearnEMGMMMachineObject* ubm_gmmMachine = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMGMMMachine_Type,&ubm_gmmMachine);
+
+  self->cxx->setUbm(ubm_gmmMachine->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("ubm could not be set", -1)  
+}
+
+
+
+static PyGetSetDef PyBobLearnEMISVBase_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMISVBase_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  
+  {
+   supervector_length.name(),
+   (getter)PyBobLearnEMISVBase_getSupervectorLength,
+   0,
+   supervector_length.doc(),
+   0
+  },
+  
+  {
+   U.name(),
+   (getter)PyBobLearnEMISVBase_getU,
+   (setter)PyBobLearnEMISVBase_setU,
+   U.doc(),
+   0
+  },
+  
+  {
+   D.name(),
+   (getter)PyBobLearnEMISVBase_getD,
+   (setter)PyBobLearnEMISVBase_setD,
+   D.doc(),
+   0
+  },
+
+  {
+   ubm.name(),
+   (getter)PyBobLearnEMISVBase_getUBM,
+   (setter)PyBobLearnEMISVBase_setUBM,
+   ubm.doc(),
+   0
+  },
+
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the ISVBase to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMISVBase_Save(PyBobLearnEMISVBaseObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the ISVBase to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMISVBase_Load(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this ISVBase with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.ISVBase`", "A ISVBase object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMISVBase_IsSimilarTo(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMISVBaseObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMISVBase_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Resets the dimensionality of the subspace U. "
+  "U is hence uninitialized.",
+  0,
+  true
+)
+.add_prototype("rU")
+.add_parameter("rU", "int", "Size of U (Within client variation matrix)");
+static PyObject* PyBobLearnEMISVBase_resize(PyBobLearnEMISVBaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int rU = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &rU)) Py_RETURN_NONE;
+
+  if (rU <= 0){
+    PyErr_Format(PyExc_TypeError, "rU must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+
+  self->cxx->resize(rU);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+
+static PyMethodDef PyBobLearnEMISVBase_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMISVBase_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMISVBase_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMISVBase_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMISVBase_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the ISV type struct; will be initialized later
+PyTypeObject PyBobLearnEMISVBase_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMISVBase(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMISVBase_Type.tp_name      = ISVBase_doc.name();
+  PyBobLearnEMISVBase_Type.tp_basicsize = sizeof(PyBobLearnEMISVBaseObject);
+  PyBobLearnEMISVBase_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMISVBase_Type.tp_doc       = ISVBase_doc.doc();
+
+  // set the functions
+  PyBobLearnEMISVBase_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMISVBase_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMISVBase_init);
+  PyBobLearnEMISVBase_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMISVBase_delete);
+  PyBobLearnEMISVBase_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMISVBase_RichCompare);
+  PyBobLearnEMISVBase_Type.tp_methods     = PyBobLearnEMISVBase_methods;
+  PyBobLearnEMISVBase_Type.tp_getset      = PyBobLearnEMISVBase_getseters;
+  //PyBobLearnEMISVBase_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMISVBase_forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMISVBase_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMISVBase_Type);
+  return PyModule_AddObject(module, "ISVBase", (PyObject*)&PyBobLearnEMISVBase_Type) >= 0;
+}
+
diff --git a/bob/learn/em/isv_machine.cpp b/bob/learn/em/isv_machine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..9b56ad7ed0779f8cd96a4bc29f9a0852230b7e5b
--- /dev/null
+++ b/bob/learn/em/isv_machine.cpp
@@ -0,0 +1,604 @@
+/**
+ * @date Wed Jan 28 13:03:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto ISVMachine_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".ISVMachine",
+  "A ISVMachine. An attached :py:class:`bob.learn.em.ISVBase` should be provided for Joint Factor Analysis. The :py:class:`bob.learn.em.ISVMachine` carries information about the speaker factors y and z, whereas a :py:class:`bob.learn.em.JFABase` carries information about the matrices U, V and D."
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new ISVMachine",
+    "",
+    true
+  )
+  .add_prototype("isv_base","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+
+  .add_parameter("isv", ":py:class:`bob.learn.em.ISVBase`", "The ISVBase associated with this machine")
+  .add_parameter("other", ":py:class:`bob.learn.em.ISVMachine`", "A ISVMachine object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMISVMachine_init_copy(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVMachine_doc.kwlist(1);
+  PyBobLearnEMISVMachineObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVMachine_Type, &o)){
+    ISVMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ISVMachine(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMISVMachine_init_hdf5(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVMachine_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    ISVMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ISVMachine(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMISVMachine_init_isvbase(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVMachine_doc.kwlist(0);
+  
+  PyBobLearnEMISVBaseObject* isv_base;
+
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base)){
+    ISVMachine_doc.print_usage();
+    return -1;
+  }
+  
+  self->cxx.reset(new bob::learn::em::ISVMachine(isv_base->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMISVMachine_init(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if(nargs == 1){
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+
+    // If the constructor input is Gaussian object
+    if (PyBobLearnEMISVMachine_Check(arg))
+      return PyBobLearnEMISVMachine_init_copy(self, args, kwargs);
+    // If the constructor input is a HDF5
+    else if (PyBobIoHDF5File_Check(arg))
+      return PyBobLearnEMISVMachine_init_hdf5(self, args, kwargs);
+    // If the constructor input is a JFABase Object
+    else
+      return PyBobLearnEMISVMachine_init_isvbase(self, args, kwargs);
+  }
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+    ISVMachine_doc.print_usage();
+    return -1;
+  }
+  
+  BOB_CATCH_MEMBER("cannot create ISVMachine", 0)
+  return 0;
+}
+
+static void PyBobLearnEMISVMachine_delete(PyBobLearnEMISVMachineObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMISVMachine_RichCompare(PyBobLearnEMISVMachineObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMISVMachine_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMISVMachineObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare ISVMachine objects", 0)
+}
+
+int PyBobLearnEMISVMachine_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMISVMachine_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int, int)",
+  "A tuple that represents the number of gaussians, dimensionality of each Gaussian and dimensionality of the rU (within client variability matrix)) ``(#Gaussians, #Inputs, #rU)``.",
+  ""
+);
+PyObject* PyBobLearnEMISVMachine_getShape(PyBobLearnEMISVMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** supervector_length *****/
+static auto supervector_length = bob::extension::VariableDoc(
+  "supervector_length",
+  "int",
+
+  "Returns the supervector length."
+  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
+  
+  "@warning An exception is thrown if no Universal Background Model has been set yet."
+);
+PyObject* PyBobLearnEMISVMachine_getSupervectorLength(PyBobLearnEMISVMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("i", self->cxx->getSupervectorLength());
+  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
+}
+
+/***** z *****/
+static auto Z = bob::extension::VariableDoc(
+  "z",
+  "array_like <float, 1D>",
+  "Returns the z speaker factor. Eq (31) from [McCool2013]",
+  ""
+);
+PyObject* PyBobLearnEMISVMachine_getZ(PyBobLearnEMISVMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZ());
+  BOB_CATCH_MEMBER("`z` could not be read", 0)
+}
+int PyBobLearnEMISVMachine_setZ(PyBobLearnEMISVMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, Z.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "z");
+  if (!b) return -1;
+  self->cxx->setZ(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`z` vector could not be set", -1)
+}
+
+
+/***** x *****/
+static auto X = bob::extension::VariableDoc(
+  "x",
+  "array_like <float, 1D>",
+  "Returns the X session factor. Eq (29) from [McCool2013]",
+  "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines."
+);
+PyObject* PyBobLearnEMISVMachine_getX(PyBobLearnEMISVMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getX());
+  BOB_CATCH_MEMBER("`x` could not be read", 0)
+}
+
+
+/***** isv_base *****/
+static auto isv_base = bob::extension::VariableDoc(
+  "isv_base",
+  ":py:class:`bob.learn.em.ISVBase`",
+  "The ISVBase attached to this machine",
+  ""
+);
+PyObject* PyBobLearnEMISVMachine_getISVBase(PyBobLearnEMISVMachineObject* self, void*){
+  BOB_TRY
+
+  boost::shared_ptr<bob::learn::em::ISVBase> isv_base_o = self->cxx->getISVBase();
+
+  //Allocating the correspondent python object
+  PyBobLearnEMISVBaseObject* retval =
+    (PyBobLearnEMISVBaseObject*)PyBobLearnEMISVBase_Type.tp_alloc(&PyBobLearnEMISVBase_Type, 0);
+  retval->cxx = isv_base_o;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("isv_base could not be read", 0)
+}
+int PyBobLearnEMISVMachine_setISVBase(PyBobLearnEMISVMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMISVBase_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.ISVBase`", Py_TYPE(self)->tp_name, isv_base.name());
+    return -1;
+  }
+
+  PyBobLearnEMISVBaseObject* isv_base_o = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMISVBase_Type,&isv_base_o);
+
+  self->cxx->setISVBase(isv_base_o->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("isv_base could not be set", -1)  
+}
+
+
+
+
+static PyGetSetDef PyBobLearnEMISVMachine_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMISVMachine_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  
+  {
+   supervector_length.name(),
+   (getter)PyBobLearnEMISVMachine_getSupervectorLength,
+   0,
+   supervector_length.doc(),
+   0
+  },
+  
+  {
+   isv_base.name(),
+   (getter)PyBobLearnEMISVMachine_getISVBase,
+   (setter)PyBobLearnEMISVMachine_setISVBase,
+   isv_base.doc(),
+   0
+  },
+
+  {
+   Z.name(),
+   (getter)PyBobLearnEMISVMachine_getZ,
+   (setter)PyBobLearnEMISVMachine_setZ,
+   Z.doc(),
+   0
+  },
+
+  {
+   X.name(),
+   (getter)PyBobLearnEMISVMachine_getX,
+   0,
+   X.doc(),
+   0
+  },
+
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the ISVMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMISVMachine_Save(PyBobLearnEMISVMachineObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the ISVMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMISVMachine_Load(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this ISVMachine with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.ISVMachine`", "A ISVMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMISVMachine_IsSimilarTo(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMISVMachineObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMISVMachine_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** estimate_x ***/
+static auto estimate_x = bob::extension::FunctionDoc(
+  "estimate_x",
+  "Estimates the session offset x (LPT assumption) given GMM statistics.",
+  "Estimates x from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM", 
+  true
+)
+.add_prototype("stats,input")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM")
+.add_parameter("input", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMISVMachine_estimateX(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = estimate_x.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyBlitzArrayObject* input           = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+                                                                 &PyBlitzArray_Converter,&input))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+  self->cxx->estimateX(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
+
+  BOB_CATCH_MEMBER("cannot estimate X", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** estimate_ux ***/
+static auto estimate_ux = bob::extension::FunctionDoc(
+  "estimate_ux",
+  "Estimates Ux (LPT assumption) given GMM statistics.",
+  "Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.", 
+  true
+)
+.add_prototype("stats,input")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM")
+.add_parameter("input", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMISVMachine_estimateUx(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = estimate_ux.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyBlitzArrayObject* input           = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+                                                                 &PyBlitzArray_Converter,&input))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+  self->cxx->estimateUx(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
+
+  BOB_CATCH_MEMBER("cannot estimate Ux", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** forward_ux ***/
+static auto forward_ux = bob::extension::FunctionDoc(
+  "forward_ux",
+  "Computes a score for the given UBM statistics and given the Ux vector",
+  "", 
+  true
+)
+.add_prototype("stats,ux")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input")
+.add_parameter("ux", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMISVMachine_ForwardUx(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = forward_ux.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyBlitzArrayObject* ux_input        = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+                                                                 &PyBlitzArray_Converter,&ux_input))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto ux_input_ = make_safe(ux_input);
+  double score = self->cxx->forward(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(ux_input));
+  
+  return Py_BuildValue("d", score);
+  BOB_CATCH_MEMBER("cannot forward_ux", 0)
+}
+
+
+/*** forward ***/
+static auto forward = bob::extension::FunctionDoc(
+  "forward",
+  "Execute the machine",
+  "", 
+  true
+)
+.add_prototype("stats")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
+static PyObject* PyBobLearnEMISVMachine_Forward(PyBobLearnEMISVMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = forward.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  double score = self->cxx->forward(*stats->cxx);
+
+  return Py_BuildValue("d", score);
+  BOB_CATCH_MEMBER("cannot forward", 0)
+
+}
+
+
+static PyMethodDef PyBobLearnEMISVMachine_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMISVMachine_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMISVMachine_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMISVMachine_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  
+  {
+    estimate_x.name(),
+    (PyCFunction)PyBobLearnEMISVMachine_estimateX,
+    METH_VARARGS|METH_KEYWORDS,
+    estimate_x.doc()
+  },
+  
+  {
+    estimate_ux.name(),
+    (PyCFunction)PyBobLearnEMISVMachine_estimateUx,
+    METH_VARARGS|METH_KEYWORDS,
+    estimate_ux.doc()
+  },
+
+  {
+    forward_ux.name(),
+    (PyCFunction)PyBobLearnEMISVMachine_ForwardUx,
+    METH_VARARGS|METH_KEYWORDS,
+    forward_ux.doc()
+  },
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the JFA type struct; will be initialized later
+PyTypeObject PyBobLearnEMISVMachine_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMISVMachine(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMISVMachine_Type.tp_name      = ISVMachine_doc.name();
+  PyBobLearnEMISVMachine_Type.tp_basicsize = sizeof(PyBobLearnEMISVMachineObject);
+  PyBobLearnEMISVMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMISVMachine_Type.tp_doc       = ISVMachine_doc.doc();
+
+  // set the functions
+  PyBobLearnEMISVMachine_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMISVMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMISVMachine_init);
+  PyBobLearnEMISVMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMISVMachine_delete);
+  PyBobLearnEMISVMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMISVMachine_RichCompare);
+  PyBobLearnEMISVMachine_Type.tp_methods     = PyBobLearnEMISVMachine_methods;
+  PyBobLearnEMISVMachine_Type.tp_getset      = PyBobLearnEMISVMachine_getseters;
+  PyBobLearnEMISVMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMISVMachine_Forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMISVMachine_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMISVMachine_Type);
+  return PyModule_AddObject(module, "ISVMachine", (PyObject*)&PyBobLearnEMISVMachine_Type) >= 0;
+}
+
diff --git a/bob/learn/em/isv_trainer.cpp b/bob/learn/em/isv_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..fd4d201db7e438f62e2d82388627aebd10d71492
--- /dev/null
+++ b/bob/learn/em/isv_trainer.cpp
@@ -0,0 +1,566 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Mon 02 Fev 20:20:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+#include <boost/make_shared.hpp>
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static int extract_GMMStats_1d(PyObject *list,
+                             std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++){
+  
+    PyBobLearnEMGMMStatsObject* stats;
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
+      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+      return -1;
+    }
+    training_data.push_back(stats->cxx);
+  }
+  return 0;
+}
+
+static int extract_GMMStats_2d(PyObject *list,
+                             std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyObject* another_list;
+    PyArg_Parse(PyList_GetItem(list, i), "O!", &PyList_Type, &another_list);
+
+    std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > another_training_data;
+    for (int j=0; j<PyList_GET_SIZE(another_list); j++){
+
+      PyBobLearnEMGMMStatsObject* stats;
+      if (!PyArg_Parse(PyList_GetItem(another_list, j), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
+        PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+        return -1;
+      }
+      another_training_data.push_back(stats->cxx);
+    }
+    training_data.push_back(another_training_data);
+  }
+  return 0;
+}
+
+template <int N>
+static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
+{
+  PyObject* list = PyList_New(vec.size());
+  for(size_t i=0; i<vec.size(); i++){
+    blitz::Array<double,N> numpy_array = vec[i];
+    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
+    PyList_SET_ITEM(list, i, numpy_py_object);
+  }
+  return list;
+}
+
+template <int N>
+int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyBlitzArrayObject* blitz_object; 
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
+      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
+      return -1;
+    }
+    auto blitz_object_ = make_safe(blitz_object);
+    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
+  }
+  return 0;
+}
+
+
+
+static auto ISVTrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".ISVTrainer",
+  "ISVTrainer"
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new ISVTrainer",
+    "",
+    true
+  )
+  .add_prototype("relevance_factor","")
+  .add_prototype("other","")
+  .add_prototype("","")
+  .add_parameter("other", ":py:class:`bob.learn.em.ISVTrainer`", "A ISVTrainer object to be copied.")
+  .add_parameter("relevance_factor", "double", "")
+  .add_parameter("convergence_threshold", "double", "")
+);
+
+
+static int PyBobLearnEMISVTrainer_init_copy(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVTrainer_doc.kwlist(1);
+  PyBobLearnEMISVTrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVTrainer_Type, &o)){
+    ISVTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ISVTrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMISVTrainer_init_number(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = ISVTrainer_doc.kwlist(0);
+  double relevance_factor      = 4.;
+
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "d", kwlist, &relevance_factor))
+    return -1;
+
+  if(relevance_factor < 0){
+    PyErr_Format(PyExc_TypeError, "gaussians argument must be greater than zero");
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::ISVTrainer(relevance_factor));
+  return 0;
+}
+
+
+static int PyBobLearnEMISVTrainer_init(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch(nargs){
+    case 0:{
+      self->cxx.reset(new bob::learn::em::ISVTrainer());
+      return 0;
+    }
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+      
+      if(PyBobLearnEMISVTrainer_Check(arg))
+        // If the constructor input is ISVTrainer object
+        return PyBobLearnEMISVTrainer_init_copy(self, args, kwargs);
+      else
+        return PyBobLearnEMISVTrainer_init_number(self, args, kwargs);
+
+    }
+    default:{
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      ISVTrainer_doc.print_usage();
+      return -1;
+    }
+  }
+  BOB_CATCH_MEMBER("cannot create ISVTrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMISVTrainer_delete(PyBobLearnEMISVTrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMISVTrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMISVTrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMISVTrainer_RichCompare(PyBobLearnEMISVTrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMISVTrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMISVTrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare ISVTrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+static auto acc_u_a1 = bob::extension::VariableDoc(
+  "acc_u_a1",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMISVTrainer_get_acc_u_a1(PyBobLearnEMISVTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA1());
+  BOB_CATCH_MEMBER("acc_u_a1 could not be read", 0)
+}
+int PyBobLearnEMISVTrainer_set_acc_u_a1(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_u_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_u_a1");
+  if (!b) return -1;
+  self->cxx->setAccUA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_u_a1 could not be set", -1)
+}
+
+
+static auto acc_u_a2 = bob::extension::VariableDoc(
+  "acc_u_a2",
+  "array_like <float, 2D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMISVTrainer_get_acc_u_a2(PyBobLearnEMISVTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA2());
+  BOB_CATCH_MEMBER("acc_u_a2 could not be read", 0)
+}
+int PyBobLearnEMISVTrainer_set_acc_u_a2(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_u_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_u_a2");
+  if (!b) return -1;
+  self->cxx->setAccUA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_u_a2 could not be set", -1)
+}
+
+
+
+
+
+static auto __X__ = bob::extension::VariableDoc(
+  "__X__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMISVTrainer_get_X(PyBobLearnEMISVTrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getX());
+  BOB_CATCH_MEMBER("__X__ could not be read", 0)
+}
+int PyBobLearnEMISVTrainer_set_X(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  if (!PyList_Check(value)){
+    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __X__.name());
+    return -1;
+  }
+    
+  std::vector<blitz::Array<double,2> > data;
+  if(list_as_vector(value ,data)==0){
+    self->cxx->setX(data);
+  }
+    
+  return 0;
+  BOB_CATCH_MEMBER("__X__ could not be written", 0)
+}
+
+
+static auto __Z__ = bob::extension::VariableDoc(
+  "__Z__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMISVTrainer_get_Z(PyBobLearnEMISVTrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getZ());
+  BOB_CATCH_MEMBER("__Z__ could not be read", 0)
+}
+int PyBobLearnEMISVTrainer_set_Z(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  if (!PyList_Check(value)){
+    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Z__.name());
+    return -1;
+  }
+    
+  std::vector<blitz::Array<double,1> > data;
+  if(list_as_vector(value ,data)==0){
+    self->cxx->setZ(data);
+  }
+    
+  return 0;
+  BOB_CATCH_MEMBER("__Z__ could not be written", 0)
+}
+
+
+
+
+static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = { 
+  {
+   acc_u_a1.name(),
+   (getter)PyBobLearnEMISVTrainer_get_acc_u_a1,
+   (setter)PyBobLearnEMISVTrainer_set_acc_u_a1,
+   acc_u_a1.doc(),
+   0
+  },
+  {
+   acc_u_a2.name(),
+   (getter)PyBobLearnEMISVTrainer_get_acc_u_a2,
+   (setter)PyBobLearnEMISVTrainer_set_acc_u_a2,
+   acc_u_a2.doc(),
+   0
+  },
+  {
+   __X__.name(),
+   (getter)PyBobLearnEMISVTrainer_get_X,
+   (setter)PyBobLearnEMISVTrainer_set_X,
+   __X__.doc(),
+   0
+  },
+  {
+   __Z__.name(),
+   (getter)PyBobLearnEMISVTrainer_get_Z,
+   (setter)PyBobLearnEMISVTrainer_set_Z,
+   __Z__.doc(),
+   0
+  },
+  
+  
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("isv_base,stats")
+.add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMISVBaseObject* isv_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->initialize(*isv_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step ***/
+static auto e_step = bob::extension::FunctionDoc(
+  "e_step",
+  "Call the e-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("isv_base,stats")
+.add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMISVTrainer_e_step(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = e_step.kwlist(0);
+
+  PyBobLearnEMISVBaseObject* isv_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->eStep(*isv_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step ***/
+static auto m_step = bob::extension::FunctionDoc(
+  "m_step",
+  "Call the m-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("isv_base")
+.add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object");
+static PyObject* PyBobLearnEMISVTrainer_m_step(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot 
+  char** kwlist = m_step.kwlist(0);
+
+  PyBobLearnEMISVBaseObject* isv_base = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base)) return 0;
+
+  self->cxx->mStep(*isv_base->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+/*** enrol ***/
+static auto enrol = bob::extension::FunctionDoc(
+  "enrol",
+  "",
+  "",
+  true
+)
+.add_prototype("isv_machine,features,n_iter","")
+.add_parameter("isv_machine", ":py:class:`bob.learn.em.ISVMachine`", "ISVMachine Object")
+.add_parameter("features", "list(:py:class:`bob.learn.em.GMMStats`)`", "")
+.add_parameter("n_iter", "int", "Number of iterations");
+static PyObject* PyBobLearnEMISVTrainer_enrol(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = enrol.kwlist(0);
+
+  PyBobLearnEMISVMachineObject* isv_machine = 0;
+  PyObject* stats = 0;
+  int n_iter = 1;
+
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!i", kwlist, &PyBobLearnEMISVMachine_Type, &isv_machine,
+                                                                  &PyList_Type, &stats, &n_iter)) Py_RETURN_NONE;
+
+  std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > training_data;
+  if(extract_GMMStats_1d(stats ,training_data)==0)
+    self->cxx->enrol(*isv_machine->cxx, training_data, n_iter);
+
+  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+static PyMethodDef PyBobLearnEMISVTrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMISVTrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    e_step.name(),
+    (PyCFunction)PyBobLearnEMISVTrainer_e_step,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step.doc()
+  },
+  {
+    m_step.name(),
+    (PyCFunction)PyBobLearnEMISVTrainer_m_step,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step.doc()
+  },
+  {
+    enrol.name(),
+    (PyCFunction)PyBobLearnEMISVTrainer_enrol,
+    METH_VARARGS|METH_KEYWORDS,
+    enrol.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMISVTrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMISVTrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMISVTrainer_Type.tp_name      = ISVTrainer_doc.name();
+  PyBobLearnEMISVTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMISVTrainerObject);
+  PyBobLearnEMISVTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
+  PyBobLearnEMISVTrainer_Type.tp_doc       = ISVTrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMISVTrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMISVTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMISVTrainer_init);
+  PyBobLearnEMISVTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMISVTrainer_delete);
+  PyBobLearnEMISVTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMISVTrainer_RichCompare);
+  PyBobLearnEMISVTrainer_Type.tp_methods      = PyBobLearnEMISVTrainer_methods;
+  PyBobLearnEMISVTrainer_Type.tp_getset       = PyBobLearnEMISVTrainer_getseters;
+  //PyBobLearnEMISVTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMISVTrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMISVTrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMISVTrainer_Type);
+  return PyModule_AddObject(module, "_ISVTrainer", (PyObject*)&PyBobLearnEMISVTrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/ivector_machine.cpp b/bob/learn/em/ivector_machine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..95badbc7b375c92ad453946e08239fac34da5d15
--- /dev/null
+++ b/bob/learn/em/ivector_machine.cpp
@@ -0,0 +1,676 @@
+/**
+ * @date Wed Jan 28 17:46:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto IVectorMachine_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".IVectorMachine",
+  "An IVectorMachine consists of a Total Variability subspace \f$T\f$ and allows the extraction of IVector"
+  "References: [Dehak2010]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new IVectorMachine",
+    "",
+    true
+  )
+  .add_prototype("ubm, rt, variance_threshold","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+
+  .add_parameter("ubm", ":py:class:`bob.learn.em.GMMMachine`", "The Universal Background Model.")
+  .add_parameter("rt", "int", "Size of the Total Variability matrix (CD x rt).")
+  .add_parameter("variance_threshold", "double", "Variance flooring threshold for the :math:`\\Sigma` (diagonal) matrix")
+
+  .add_parameter("other", ":py:class:`bob.learn.em.IVectorMachine`", "A IVectorMachine object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMIVectorMachine_init_copy(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = IVectorMachine_doc.kwlist(1);
+  PyBobLearnEMIVectorMachineObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &o)){
+    IVectorMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::IVectorMachine(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMIVectorMachine_init_hdf5(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = IVectorMachine_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    IVectorMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::IVectorMachine(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMIVectorMachine_init_ubm(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = IVectorMachine_doc.kwlist(0);
+  
+  PyBobLearnEMGMMMachineObject* gmm_machine;
+  int rt = 1;
+  double variance_threshold = 1e-10;
+
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, &rt, &variance_threshold)){
+    IVectorMachine_doc.print_usage();
+    return -1;
+  }
+    
+  if(rt < 1){
+    PyErr_Format(PyExc_TypeError, "rt argument must be greater than or equal to one");
+    return -1;
+  }
+  
+  if(variance_threshold <= 0){
+    PyErr_Format(PyExc_TypeError, "variance_threshold argument must be greater than zero");
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::IVectorMachine(gmm_machine->cxx, rt, variance_threshold));
+  return 0;
+}
+
+
+static int PyBobLearnEMIVectorMachine_init(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if(nargs == 1){
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+
+    // If the constructor input is Gaussian object
+    if (PyBobLearnEMIVectorMachine_Check(arg))
+      return PyBobLearnEMIVectorMachine_init_copy(self, args, kwargs);
+    // If the constructor input is a HDF5
+    else
+      return PyBobLearnEMIVectorMachine_init_hdf5(self, args, kwargs);
+  }
+  else if ((nargs == 2) || (nargs == 3))
+    PyBobLearnEMIVectorMachine_init_ubm(self, args, kwargs);
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1,2 or 3 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+    IVectorMachine_doc.print_usage();
+    return -1;
+  }
+  
+  BOB_CATCH_MEMBER("cannot create IVectorMachine", 0)
+  return 0;
+}
+
+static void PyBobLearnEMIVectorMachine_delete(PyBobLearnEMIVectorMachineObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMIVectorMachine_RichCompare(PyBobLearnEMIVectorMachineObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMIVectorMachine_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMIVectorMachineObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare IVectorMachine objects", 0)
+}
+
+int PyBobLearnEMIVectorMachine_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMIVectorMachine_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int)",
+  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rT (total variability matrix) ``(#Gaussians, #Inputs, #rT)``.",
+  ""
+);
+PyObject* PyBobLearnEMIVectorMachine_getShape(PyBobLearnEMIVectorMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRt());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** supervector_length *****/
+static auto supervector_length = bob::extension::VariableDoc(
+  "supervector_length",
+  "int",
+
+  "Returns the supervector length."
+  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
+  
+  "@warning An exception is thrown if no Universal Background Model has been set yet."
+);
+PyObject* PyBobLearnEMIVectorMachine_getSupervectorLength(PyBobLearnEMIVectorMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("i", self->cxx->getSupervectorLength());
+  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
+}
+
+
+/***** T *****/
+static auto T = bob::extension::VariableDoc(
+  "t",
+  "array_like <float, 2D>",
+  "Returns the Total Variability matrix",
+  ""
+);
+PyObject* PyBobLearnEMIVectorMachine_getT(PyBobLearnEMIVectorMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getT());
+  BOB_CATCH_MEMBER("`t` could not be read", 0)
+}
+int PyBobLearnEMIVectorMachine_setT(PyBobLearnEMIVectorMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, T.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "t");
+  if (!b) return -1;
+  self->cxx->setT(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`t` vector could not be set", -1)
+}
+
+
+/***** sigma *****/
+static auto sigma = bob::extension::VariableDoc(
+  "sigma",
+  "array_like <float, 1D>",
+  "The residual matrix of the model sigma",
+  ""
+);
+PyObject* PyBobLearnEMIVectorMachine_getSigma(PyBobLearnEMIVectorMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getSigma());
+  BOB_CATCH_MEMBER("`sigma` could not be read", 0)
+}
+int PyBobLearnEMIVectorMachine_setSigma(PyBobLearnEMIVectorMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, sigma.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "sigma");
+  if (!b) return -1;
+  self->cxx->setSigma(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`sigma` vector could not be set", -1)
+}
+
+
+/***** variance_threshold *****/
+static auto variance_threshold = bob::extension::VariableDoc(
+  "variance_threshold",
+  "double",
+  "Threshold for the variance contained in sigma",
+  ""
+);
+PyObject* PyBobLearnEMIVectorMachine_getVarianceThreshold(PyBobLearnEMIVectorMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("d", self->cxx->getVarianceThreshold());
+  BOB_CATCH_MEMBER("variance_threshold could not be read", 0)
+}
+int PyBobLearnEMIVectorMachine_setVarianceThreshold(PyBobLearnEMIVectorMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, variance_threshold.name());
+    return -1;
+  }
+
+  if (PyFloat_AS_DOUBLE(value) < 0){
+    PyErr_Format(PyExc_TypeError, "variance_threshold must be greater than or equal to zero");
+    return -1;
+  }
+
+  self->cxx->setVarianceThreshold(PyFloat_AS_DOUBLE(value));
+  BOB_CATCH_MEMBER("variance_threshold could not be set", -1)
+  return 0;
+}
+
+
+/***** ubm *****/
+static auto ubm = bob::extension::VariableDoc(
+  "ubm",
+  ":py:class:`bob.learn.em.GMMMachine`",
+  "Returns the UBM (Universal Background Model",
+  ""
+);
+PyObject* PyBobLearnEMIVectorMachine_getUBM(PyBobLearnEMIVectorMachineObject* self, void*){
+  BOB_TRY
+
+  boost::shared_ptr<bob::learn::em::GMMMachine> ubm_gmmMachine = self->cxx->getUbm();
+
+  //Allocating the correspondent python object
+  PyBobLearnEMGMMMachineObject* retval =
+    (PyBobLearnEMGMMMachineObject*)PyBobLearnEMGMMMachine_Type.tp_alloc(&PyBobLearnEMGMMMachine_Type, 0);
+  retval->cxx = ubm_gmmMachine;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("ubm could not be read", 0)
+}
+int PyBobLearnEMIVectorMachine_setUBM(PyBobLearnEMIVectorMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMMachine_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.GMMMachine`", Py_TYPE(self)->tp_name, ubm.name());
+    return -1;
+  }
+
+  PyBobLearnEMGMMMachineObject* ubm_gmmMachine = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMGMMMachine_Type,&ubm_gmmMachine);
+
+  self->cxx->setUbm(ubm_gmmMachine->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("ubm could not be set", -1)  
+}
+
+
+static PyGetSetDef PyBobLearnEMIVectorMachine_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMIVectorMachine_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  
+  {
+   supervector_length.name(),
+   (getter)PyBobLearnEMIVectorMachine_getSupervectorLength,
+   0,
+   supervector_length.doc(),
+   0
+  },
+  
+  {
+   T.name(),
+   (getter)PyBobLearnEMIVectorMachine_getT,
+   (setter)PyBobLearnEMIVectorMachine_setT,
+   T.doc(),
+   0
+  },
+
+  {
+   variance_threshold.name(),
+   (getter)PyBobLearnEMIVectorMachine_getVarianceThreshold,
+   (setter)PyBobLearnEMIVectorMachine_setVarianceThreshold,
+   variance_threshold.doc(),
+   0
+  },
+
+  {
+   sigma.name(),
+   (getter)PyBobLearnEMIVectorMachine_getSigma,
+   (setter)PyBobLearnEMIVectorMachine_setSigma,
+   sigma.doc(),
+   0
+  },
+
+  {
+   ubm.name(),
+   (getter)PyBobLearnEMIVectorMachine_getUBM,
+   (setter)PyBobLearnEMIVectorMachine_setUBM,
+   ubm.doc(),
+   0
+  },
+
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the IVectorMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMIVectorMachine_Save(PyBobLearnEMIVectorMachineObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the IVectorMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMIVectorMachine_Load(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this IVectorMachine with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.IVectorMachine`", "A IVectorMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMIVectorMachine_IsSimilarTo(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMIVectorMachineObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMIVectorMachine_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+
+/*** forward ***/
+static auto forward = bob::extension::FunctionDoc(
+  "forward",
+  "Execute the machine",
+  "", 
+  true
+)
+.add_prototype("stats")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
+static PyObject* PyBobLearnEMIVectorMachine_Forward(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = forward.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+   blitz::Array<double,1> ivector(self->cxx->getDimRt());
+   self->cxx->forward(*stats->cxx, ivector);
+
+  return PyBlitzArrayCxx_AsConstNumpy(ivector);
+  
+  BOB_CATCH_MEMBER("cannot forward", 0)
+
+}
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Resets the dimensionality of the subspace T. ",
+  0,
+  true
+)
+.add_prototype("rT")
+.add_parameter("rT", "int", "Size of T (Total variability matrix)");
+static PyObject* PyBobLearnEMIVectorMachine_resize(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int rT = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &rT)) Py_RETURN_NONE;
+
+  if (rT < 1){
+    PyErr_Format(PyExc_TypeError, "rU must be greater than one");
+    resize.print_usage();
+    return 0;
+  }
+
+  self->cxx->resize(rT);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** __compute_Id_TtSigmaInvT__ ***/
+static auto __compute_Id_TtSigmaInvT__ = bob::extension::FunctionDoc(
+  "__compute_Id_TtSigmaInvT__",
+  "",
+  "", 
+  true
+)
+.add_prototype("stats")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
+static PyObject* PyBobLearnEMIVectorMachine_compute_Id_TtSigmaInvT__(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = __compute_Id_TtSigmaInvT__.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+
+  blitz::Array<double,2> output(self->cxx->getDimRt(), self->cxx->getDimRt());
+  self->cxx->computeIdTtSigmaInvT(*stats->cxx, output);
+  return PyBlitzArrayCxx_AsConstNumpy(output);
+  
+  BOB_CATCH_MEMBER("cannot __compute_Id_TtSigmaInvT__", 0)
+}
+
+
+
+/*** __compute_TtSigmaInvFnorm__ ***/
+static auto __compute_TtSigmaInvFnorm__ = bob::extension::FunctionDoc(
+  "__compute_TtSigmaInvFnorm__",
+  "",
+  "", 
+  true
+)
+.add_prototype("stats")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
+static PyObject* PyBobLearnEMIVectorMachine_compute_TtSigmaInvFnorm__(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = __compute_TtSigmaInvFnorm__.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+
+  blitz::Array<double,1> output(self->cxx->getDimRt());
+  self->cxx->computeTtSigmaInvFnorm(*stats->cxx, output);
+  return PyBlitzArrayCxx_AsConstNumpy(output);
+  
+  BOB_CATCH_MEMBER("cannot __compute_TtSigmaInvFnorm__", 0)
+}
+
+
+
+
+static PyMethodDef PyBobLearnEMIVectorMachine_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  {
+    __compute_Id_TtSigmaInvT__.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_compute_Id_TtSigmaInvT__,
+    METH_VARARGS|METH_KEYWORDS,
+    __compute_Id_TtSigmaInvT__.doc()
+  },
+  {
+    __compute_TtSigmaInvFnorm__.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_compute_TtSigmaInvFnorm__,
+    METH_VARARGS|METH_KEYWORDS,
+    __compute_TtSigmaInvFnorm__.doc()
+  },
+
+/*
+  {
+    forward.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_Forward,
+    METH_VARARGS|METH_KEYWORDS,
+    forward.doc()
+  },*/
+
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the JFA type struct; will be initialized later
+PyTypeObject PyBobLearnEMIVectorMachine_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMIVectorMachine(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMIVectorMachine_Type.tp_name      = IVectorMachine_doc.name();
+  PyBobLearnEMIVectorMachine_Type.tp_basicsize = sizeof(PyBobLearnEMIVectorMachineObject);
+  PyBobLearnEMIVectorMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMIVectorMachine_Type.tp_doc       = IVectorMachine_doc.doc();
+
+  // set the functions
+  PyBobLearnEMIVectorMachine_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMIVectorMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMIVectorMachine_init);
+  PyBobLearnEMIVectorMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMIVectorMachine_delete);
+  PyBobLearnEMIVectorMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMIVectorMachine_RichCompare);
+  PyBobLearnEMIVectorMachine_Type.tp_methods     = PyBobLearnEMIVectorMachine_methods;
+  PyBobLearnEMIVectorMachine_Type.tp_getset      = PyBobLearnEMIVectorMachine_getseters;
+  PyBobLearnEMIVectorMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnEMIVectorMachine_Forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMIVectorMachine_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMIVectorMachine_Type);
+  return PyModule_AddObject(module, "IVectorMachine", (PyObject*)&PyBobLearnEMIVectorMachine_Type) >= 0;
+}
+
diff --git a/bob/learn/em/ivector_trainer.cpp b/bob/learn/em/ivector_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..da2773eefc8e33abba6392c2c7a888da8f9fa773
--- /dev/null
+++ b/bob/learn/em/ivector_trainer.cpp
@@ -0,0 +1,453 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Tue 03 Fev 10:29:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+#include <boost/make_shared.hpp>
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
+
+static int extract_GMMStats_1d(PyObject *list,
+                             std::vector<bob::learn::em::GMMStats>& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++){
+  
+    PyBobLearnEMGMMStatsObject* stats;
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
+      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+      return -1;
+    }
+    training_data.push_back(*stats->cxx);
+
+  }
+  return 0;
+}
+
+
+static auto IVectorTrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".IVectorTrainer",
+  "IVectorTrainer"
+  "An IVectorTrainer to learn a Total Variability subspace :math:`$T$`"
+  " (and eventually a covariance matrix :math:`$\\Sigma$`).",
+  " References: [Dehak2010]"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new IVectorTrainer",
+    "",
+    true
+  )
+  .add_prototype("update_sigma","")
+  .add_prototype("other","")
+  .add_prototype("","")
+  .add_parameter("other", ":py:class:`bob.learn.em.IVectorTrainer`", "A IVectorTrainer object to be copied.")
+  .add_parameter("update_sigma", "bool", "")
+);
+
+
+static int PyBobLearnEMIVectorTrainer_init_copy(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = IVectorTrainer_doc.kwlist(1);
+  PyBobLearnEMIVectorTrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMIVectorTrainer_Type, &o)){
+    IVectorTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::IVectorTrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMIVectorTrainer_init_bool(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = IVectorTrainer_doc.kwlist(0);
+  PyObject* update_sigma   = 0;
+
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBool_Type, &update_sigma))
+    return -1;
+  
+  self->cxx.reset(new bob::learn::em::IVectorTrainer(f(update_sigma)));
+  return 0;
+}
+
+
+static int PyBobLearnEMIVectorTrainer_init(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch(nargs){
+    case 0:{
+      self->cxx.reset(new bob::learn::em::IVectorTrainer());
+      return 0;
+    }
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is IVectorTrainer object
+      if(PyBobLearnEMIVectorTrainer_Check(arg))            
+        return  PyBobLearnEMIVectorTrainer_init_copy(self, args, kwargs);
+      else
+        return PyBobLearnEMIVectorTrainer_init_bool(self, args, kwargs);      
+      
+    }
+    default:{
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      IVectorTrainer_doc.print_usage();
+      return -1;
+    }
+  }
+  BOB_CATCH_MEMBER("cannot create IVectorTrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMIVectorTrainer_delete(PyBobLearnEMIVectorTrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMIVectorTrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMIVectorTrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMIVectorTrainer_RichCompare(PyBobLearnEMIVectorTrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMIVectorTrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMIVectorTrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare IVectorTrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+static auto acc_nij_wij2 = bob::extension::VariableDoc(
+  "acc_nij_wij2",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMIVectorTrainer_get_acc_nij_wij2(PyBobLearnEMIVectorTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccNijWij2());
+  BOB_CATCH_MEMBER("acc_nij_wij2 could not be read", 0)
+}
+int PyBobLearnEMIVectorTrainer_set_acc_nij_wij2(PyBobLearnEMIVectorTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_nij_wij2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_nij_wij2");
+  if (!b) return -1;
+  self->cxx->setAccNijWij2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_nij_wij2 could not be set", -1)
+}
+
+
+static auto acc_fnormij_wij = bob::extension::VariableDoc(
+  "acc_fnormij_wij",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMIVectorTrainer_get_acc_fnormij_wij(PyBobLearnEMIVectorTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccFnormijWij());
+  BOB_CATCH_MEMBER("acc_fnormij_wij could not be read", 0)
+}
+int PyBobLearnEMIVectorTrainer_set_acc_fnormij_wij(PyBobLearnEMIVectorTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_fnormij_wij.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_fnormij_wij");
+  if (!b) return -1;
+  self->cxx->setAccFnormijWij(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_fnormij_wij could not be set", -1)
+}
+
+
+static auto acc_nij = bob::extension::VariableDoc(
+  "acc_nij",
+  "array_like <float, 1D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMIVectorTrainer_get_acc_nij(PyBobLearnEMIVectorTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccNij());
+  BOB_CATCH_MEMBER("acc_nij could not be read", 0)
+}
+int PyBobLearnEMIVectorTrainer_set_acc_nij(PyBobLearnEMIVectorTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_nij.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_nij");
+  if (!b) return -1;
+  self->cxx->setAccNij(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_nij could not be set", -1)
+}
+
+
+static auto acc_snormij = bob::extension::VariableDoc(
+  "acc_snormij",
+  "array_like <float, 2D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMIVectorTrainer_get_acc_snormij(PyBobLearnEMIVectorTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccSnormij());
+  BOB_CATCH_MEMBER("acc_snormij could not be read", 0)
+}
+int PyBobLearnEMIVectorTrainer_set_acc_snormij(PyBobLearnEMIVectorTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_snormij.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_snormij");
+  if (!b) return -1;
+  self->cxx->setAccSnormij(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_snormij could not be set", -1)
+}
+
+
+
+
+static PyGetSetDef PyBobLearnEMIVectorTrainer_getseters[] = { 
+  {
+   acc_nij_wij2.name(),
+   (getter)PyBobLearnEMIVectorTrainer_get_acc_nij_wij2,
+   (setter)PyBobLearnEMIVectorTrainer_set_acc_nij_wij2,
+   acc_nij_wij2.doc(),
+   0
+  },  
+  {
+   acc_fnormij_wij.name(),
+   (getter)PyBobLearnEMIVectorTrainer_get_acc_fnormij_wij,
+   (setter)PyBobLearnEMIVectorTrainer_set_acc_fnormij_wij,
+   acc_fnormij_wij.doc(),
+   0
+  },
+  {
+   acc_nij.name(),
+   (getter)PyBobLearnEMIVectorTrainer_get_acc_nij,
+   (setter)PyBobLearnEMIVectorTrainer_set_acc_nij,
+   acc_nij.doc(),
+   0
+  },
+  {
+   acc_snormij.name(),
+   (getter)PyBobLearnEMIVectorTrainer_get_acc_snormij,
+   (setter)PyBobLearnEMIVectorTrainer_set_acc_snormij,
+   acc_snormij.doc(),
+   0
+  },
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("ivector_machine")
+.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object");
+static PyObject* PyBobLearnEMIVectorTrainer_initialize(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMIVectorMachineObject* ivector_machine = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine)) Py_RETURN_NONE;
+
+  self->cxx->initialize(*ivector_machine->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step ***/
+static auto e_step = bob::extension::FunctionDoc(
+  "e_step",
+  "Call the e-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("ivector_machine,stats")
+.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMIVectorTrainer_e_step(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = e_step.kwlist(0);
+
+  PyBobLearnEMIVectorMachineObject* ivector_machine = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<bob::learn::em::GMMStats> training_data;
+  if(extract_GMMStats_1d(stats ,training_data)==0)
+    self->cxx->eStep(*ivector_machine->cxx, training_data);
+
+  Py_RETURN_NONE;
+  BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
+}
+
+
+/*** m_step ***/
+static auto m_step = bob::extension::FunctionDoc(
+  "m_step",
+  "Call the m-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("ivector_machine")
+.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object");
+static PyObject* PyBobLearnEMIVectorTrainer_m_step(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot 
+  char** kwlist = m_step.kwlist(0);
+
+  PyBobLearnEMIVectorMachineObject* ivector_machine = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine)) Py_RETURN_NONE;
+
+  self->cxx->mStep(*ivector_machine->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+static PyMethodDef PyBobLearnEMIVectorTrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMIVectorTrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    e_step.name(),
+    (PyCFunction)PyBobLearnEMIVectorTrainer_e_step,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step.doc()
+  },
+  {
+    m_step.name(),
+    (PyCFunction)PyBobLearnEMIVectorTrainer_m_step,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMIVectorTrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMIVectorTrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMIVectorTrainer_Type.tp_name      = IVectorTrainer_doc.name();
+  PyBobLearnEMIVectorTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMIVectorTrainerObject);
+  PyBobLearnEMIVectorTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
+  PyBobLearnEMIVectorTrainer_Type.tp_doc       = IVectorTrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMIVectorTrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMIVectorTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMIVectorTrainer_init);
+  PyBobLearnEMIVectorTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMIVectorTrainer_delete);
+  PyBobLearnEMIVectorTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMIVectorTrainer_RichCompare);
+  PyBobLearnEMIVectorTrainer_Type.tp_methods      = PyBobLearnEMIVectorTrainer_methods;
+  PyBobLearnEMIVectorTrainer_Type.tp_getset       = PyBobLearnEMIVectorTrainer_getseters;
+  //PyBobLearnEMIVectorTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMIVectorTrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMIVectorTrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMIVectorTrainer_Type);
+  return PyModule_AddObject(module, "_IVectorTrainer", (PyObject*)&PyBobLearnEMIVectorTrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/jfa_base.cpp b/bob/learn/em/jfa_base.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d5c3d9b596e23308ba4fae6adc1de560221627d5
--- /dev/null
+++ b/bob/learn/em/jfa_base.cpp
@@ -0,0 +1,578 @@
+/**
+ * @date Wed Jan 27 17:03:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto JFABase_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".JFABase",
+  "A JFABase instance can be seen as a container for U, V and D when performing Joint Factor Analysis (JFA)."
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new JFABase",
+    "",
+    true
+  )
+  .add_prototype("gmm,ru,rv","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+  .add_prototype("","")
+
+  .add_parameter("gmm", ":py:class:`bob.learn.em.GMMMachine`", "The Universal Background Model.")
+  .add_parameter("ru", "int", "Size of U (Within client variation matrix). In the end the U matrix will have (number_of_gaussians * feature_dimension x ru)")
+  .add_parameter("rv", "int", "Size of V (Between client variation matrix). In the end the U matrix will have (number_of_gaussians * feature_dimension x rv)")
+  .add_parameter("other", ":py:class:`bob.learn.em.JFABase`", "A JFABase object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMJFABase_init_copy(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFABase_doc.kwlist(1);
+  PyBobLearnEMJFABaseObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMJFABase_Type, &o)){
+    JFABase_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::JFABase(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMJFABase_init_hdf5(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFABase_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    JFABase_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::JFABase(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMJFABase_init_ubm(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFABase_doc.kwlist(0);
+  
+  PyBobLearnEMGMMMachineObject* ubm;
+  int ru = 1;
+  int rv = 1;
+
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!ii", kwlist, &PyBobLearnEMGMMMachine_Type, &ubm,
+                                                                &ru, &rv)){
+    JFABase_doc.print_usage();
+    return -1;
+  }
+  
+  if(ru < 0){
+    PyErr_Format(PyExc_TypeError, "ru argument must be greater than or equal to one");
+    return -1;
+  }
+  
+  if(rv < 0){
+    PyErr_Format(PyExc_TypeError, "rv argument must be greater than or equal to one");
+    return -1;
+  }
+  
+  self->cxx.reset(new bob::learn::em::JFABase(ubm->cxx, ru, rv));
+  return 0;
+}
+
+
+static int PyBobLearnEMJFABase_init(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+    
+  switch (nargs) {
+
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is Gaussian object
+     if (PyBobLearnEMJFABase_Check(arg))
+       return PyBobLearnEMJFABase_init_copy(self, args, kwargs);
+      // If the constructor input is a HDF5
+     else if (PyBobIoHDF5File_Check(arg))
+       return PyBobLearnEMJFABase_init_hdf5(self, args, kwargs);
+    }
+    case 3:
+      return PyBobLearnEMJFABase_init_ubm(self, args, kwargs);
+    default:
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1 or 3 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      JFABase_doc.print_usage();
+      return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create JFABase", 0)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMJFABase_delete(PyBobLearnEMJFABaseObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMJFABase_RichCompare(PyBobLearnEMJFABaseObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMJFABase_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMJFABaseObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare JFABase objects", 0)
+}
+
+int PyBobLearnEMJFABase_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMJFABase_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int, int)",
+  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rU (within client variability matrix) and dimensionality of the rV (between client variability matrix) ``(#Gaussians, #Inputs, #rU, #rV)``.",
+  ""
+);
+PyObject* PyBobLearnEMJFABase_getShape(PyBobLearnEMJFABaseObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu(), self->cxx->getDimRv());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** supervector_length *****/
+static auto supervector_length = bob::extension::VariableDoc(
+  "supervector_length",
+  "int",
+
+  "Returns the supervector length."
+  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
+  
+  "@warning An exception is thrown if no Universal Background Model has been set yet."
+);
+PyObject* PyBobLearnEMJFABase_getSupervectorLength(PyBobLearnEMJFABaseObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("i", self->cxx->getSupervectorLength());
+  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
+}
+
+
+/***** u *****/
+static auto U = bob::extension::VariableDoc(
+  "u",
+  "array_like <float, 2D>",
+  "Returns the U matrix (within client variability matrix)",
+  ""
+);
+PyObject* PyBobLearnEMJFABase_getU(PyBobLearnEMJFABaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getU());
+  BOB_CATCH_MEMBER("``u`` could not be read", 0)
+}
+int PyBobLearnEMJFABase_setU(PyBobLearnEMJFABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, U.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "u");
+  if (!b) return -1;
+  self->cxx->setU(*b);
+  return 0;
+  BOB_CATCH_MEMBER("``u`` matrix could not be set", -1)
+}
+
+/***** v *****/
+static auto V = bob::extension::VariableDoc(
+  "v",
+  "array_like <float, 2D>",
+  "Returns the V matrix (between client variability matrix)",
+  ""
+);
+PyObject* PyBobLearnEMJFABase_getV(PyBobLearnEMJFABaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getV());
+  BOB_CATCH_MEMBER("``v`` could not be read", 0)
+}
+int PyBobLearnEMJFABase_setV(PyBobLearnEMJFABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, V.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "v");
+  if (!b) return -1;
+  self->cxx->setV(*b);
+  return 0;
+  BOB_CATCH_MEMBER("``v`` matrix could not be set", -1)
+}
+
+
+/***** d *****/
+static auto D = bob::extension::VariableDoc(
+  "d",
+  "array_like <float, 1D>",
+  "Returns the diagonal matrix diag(d) (as a 1D vector)",
+  ""
+);
+PyObject* PyBobLearnEMJFABase_getD(PyBobLearnEMJFABaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getD());
+  BOB_CATCH_MEMBER("``d`` could not be read", 0)
+}
+int PyBobLearnEMJFABase_setD(PyBobLearnEMJFABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, D.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "d");
+  if (!b) return -1;
+  self->cxx->setD(*b);
+  return 0;
+  BOB_CATCH_MEMBER("``d`` matrix could not be set", -1)
+}
+
+
+/***** ubm *****/
+static auto ubm = bob::extension::VariableDoc(
+  "ubm",
+  ":py:class:`bob.learn.em.GMMMachine`",
+  "Returns the UBM (Universal Background Model",
+  ""
+);
+PyObject* PyBobLearnEMJFABase_getUBM(PyBobLearnEMJFABaseObject* self, void*){
+  BOB_TRY
+
+  boost::shared_ptr<bob::learn::em::GMMMachine> ubm_gmmMachine = self->cxx->getUbm();
+
+  //Allocating the correspondent python object
+  PyBobLearnEMGMMMachineObject* retval =
+    (PyBobLearnEMGMMMachineObject*)PyBobLearnEMGMMMachine_Type.tp_alloc(&PyBobLearnEMGMMMachine_Type, 0);
+  retval->cxx = ubm_gmmMachine;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("ubm could not be read", 0)
+}
+int PyBobLearnEMJFABase_setUBM(PyBobLearnEMJFABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMGMMMachine_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.GMMMachine`", Py_TYPE(self)->tp_name, ubm.name());
+    return -1;
+  }
+
+  PyBobLearnEMGMMMachineObject* ubm_gmmMachine = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMGMMMachine_Type,&ubm_gmmMachine);
+
+  self->cxx->setUbm(ubm_gmmMachine->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("ubm could not be set", -1)  
+}
+
+
+
+
+static PyGetSetDef PyBobLearnEMJFABase_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMJFABase_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  
+  {
+   supervector_length.name(),
+   (getter)PyBobLearnEMJFABase_getSupervectorLength,
+   0,
+   supervector_length.doc(),
+   0
+  },
+  
+  {
+   U.name(),
+   (getter)PyBobLearnEMJFABase_getU,
+   (setter)PyBobLearnEMJFABase_setU,
+   U.doc(),
+   0
+  },
+  
+  {
+   V.name(),
+   (getter)PyBobLearnEMJFABase_getV,
+   (setter)PyBobLearnEMJFABase_setV,
+   V.doc(),
+   0
+  },
+
+  {
+   D.name(),
+   (getter)PyBobLearnEMJFABase_getD,
+   (setter)PyBobLearnEMJFABase_setD,
+   D.doc(),
+   0
+  },
+
+  {
+   ubm.name(),
+   (getter)PyBobLearnEMJFABase_getUBM,
+   (setter)PyBobLearnEMJFABase_setUBM,
+   ubm.doc(),
+   0
+  },
+
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the JFABase to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMJFABase_Save(PyBobLearnEMJFABaseObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the JFABase to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMJFABase_Load(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this JFABase with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.JFABase`", "A JFABase object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMJFABase_IsSimilarTo(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMJFABaseObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMJFABase_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Resets the dimensionality of the subspace U and V. "
+  "U and V are hence uninitialized",
+  0,
+  true
+)
+.add_prototype("rU,rV")
+.add_parameter("rU", "int", "Size of U (Within client variation matrix)")
+.add_parameter("rV", "int", "Size of V (Between client variation matrix)");
+static PyObject* PyBobLearnEMJFABase_resize(PyBobLearnEMJFABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int rU = 0;
+  int rV = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &rU, &rV)) Py_RETURN_NONE;
+
+  if (rU <= 0){
+    PyErr_Format(PyExc_TypeError, "rU must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+  if (rV <= 0){
+    PyErr_Format(PyExc_TypeError, "rV must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+  self->cxx->resize(rU, rV);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+
+static PyMethodDef PyBobLearnEMJFABase_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMJFABase_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMJFABase_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMJFABase_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMJFABase_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the JFA type struct; will be initialized later
+PyTypeObject PyBobLearnEMJFABase_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMJFABase(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMJFABase_Type.tp_name      = JFABase_doc.name();
+  PyBobLearnEMJFABase_Type.tp_basicsize = sizeof(PyBobLearnEMJFABaseObject);
+  PyBobLearnEMJFABase_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMJFABase_Type.tp_doc       = JFABase_doc.doc();
+
+  // set the functions
+  PyBobLearnEMJFABase_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMJFABase_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMJFABase_init);
+  PyBobLearnEMJFABase_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMJFABase_delete);
+  PyBobLearnEMJFABase_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMJFABase_RichCompare);
+  PyBobLearnEMJFABase_Type.tp_methods     = PyBobLearnEMJFABase_methods;
+  PyBobLearnEMJFABase_Type.tp_getset      = PyBobLearnEMJFABase_getseters;
+  //PyBobLearnEMJFABase_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMJFABase_forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMJFABase_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMJFABase_Type);
+  return PyModule_AddObject(module, "JFABase", (PyObject*)&PyBobLearnEMJFABase_Type) >= 0;
+}
+
diff --git a/bob/learn/em/jfa_machine.cpp b/bob/learn/em/jfa_machine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..5c78b6b1d4fc619f84b09d9a4461ef433545b49b
--- /dev/null
+++ b/bob/learn/em/jfa_machine.cpp
@@ -0,0 +1,650 @@
+/**
+ * @date Wed Jan 28 17:03:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto JFAMachine_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".JFAMachine",
+  "A JFAMachine. An attached :py:class:`bob.learn.em.JFABase` should be provided for Joint Factor Analysis. The :py:class:`bob.learn.em.JFAMachine` carries information about the speaker factors y and z, whereas a :py:class:`bob.learn.em.JFABase` carries information about the matrices U, V and D."
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new JFAMachine",
+    "",
+    true
+  )
+  .add_prototype("jfa_base","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+
+  .add_parameter("jfa", ":py:class:`bob.learn.em.JFABase`", "The JFABase associated with this machine")
+  .add_parameter("other", ":py:class:`bob.learn.em.JFAMachine`", "A JFAMachine object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMJFAMachine_init_copy(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFAMachine_doc.kwlist(1);
+  PyBobLearnEMJFAMachineObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMJFAMachine_Type, &o)){
+    JFAMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::JFAMachine(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMJFAMachine_init_hdf5(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFAMachine_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    JFAMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::JFAMachine(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMJFAMachine_init_jfabase(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFAMachine_doc.kwlist(0);
+  
+  PyBobLearnEMJFABaseObject* jfa_base;
+
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base)){
+    JFAMachine_doc.print_usage();
+    return -1;
+  }
+  
+  self->cxx.reset(new bob::learn::em::JFAMachine(jfa_base->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMJFAMachine_init(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if(nargs == 1){
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+
+    // If the constructor input is Gaussian object
+    if (PyBobLearnEMJFAMachine_Check(arg))
+      return PyBobLearnEMJFAMachine_init_copy(self, args, kwargs);
+    // If the constructor input is a HDF5
+    else if (PyBobIoHDF5File_Check(arg))
+      return PyBobLearnEMJFAMachine_init_hdf5(self, args, kwargs);
+    // If the constructor input is a JFABase Object
+    else
+      return PyBobLearnEMJFAMachine_init_jfabase(self, args, kwargs);
+  }
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+    JFAMachine_doc.print_usage();
+    return -1;
+  }
+  
+  BOB_CATCH_MEMBER("cannot create JFAMachine", 0)
+  return 0;
+}
+
+static void PyBobLearnEMJFAMachine_delete(PyBobLearnEMJFAMachineObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMJFAMachine_RichCompare(PyBobLearnEMJFAMachineObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMJFAMachine_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMJFAMachineObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare JFAMachine objects", 0)
+}
+
+int PyBobLearnEMJFAMachine_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMJFAMachine_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int, int)",
+  "A tuple that represents the number of gaussians, dimensionality of each Gaussian, dimensionality of the rU (within client variability matrix) and dimensionality of the rV (between client variability matrix) ``(#Gaussians, #Inputs, #rU, #rV)``.",
+  ""
+);
+PyObject* PyBobLearnEMJFAMachine_getShape(PyBobLearnEMJFAMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i,i)", self->cxx->getNGaussians(), self->cxx->getNInputs(), self->cxx->getDimRu(), self->cxx->getDimRv());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** supervector_length *****/
+static auto supervector_length = bob::extension::VariableDoc(
+  "supervector_length",
+  "int",
+
+  "Returns the supervector length."
+  "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
+  
+  "@warning An exception is thrown if no Universal Background Model has been set yet."
+);
+PyObject* PyBobLearnEMJFAMachine_getSupervectorLength(PyBobLearnEMJFAMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("i", self->cxx->getSupervectorLength());
+  BOB_CATCH_MEMBER("supervector_length could not be read", 0)
+}
+
+
+/***** y *****/
+static auto Y = bob::extension::VariableDoc(
+  "y",
+  "array_like <float, 1D>",
+  "Returns the y speaker factor. Eq (30) from [McCool2013]",
+  ""
+);
+PyObject* PyBobLearnEMJFAMachine_getY(PyBobLearnEMJFAMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getY());
+  BOB_CATCH_MEMBER("`y` could not be read", 0)
+}
+int PyBobLearnEMJFAMachine_setY(PyBobLearnEMJFAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, Y.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "y");
+  if (!b) return -1;
+  self->cxx->setY(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`y` vector could not be set", -1)
+}
+
+
+/***** z *****/
+static auto Z = bob::extension::VariableDoc(
+  "z",
+  "array_like <float, 1D>",
+  "Returns the z speaker factor. Eq (31) from [McCool2013]",
+  ""
+);
+PyObject* PyBobLearnEMJFAMachine_getZ(PyBobLearnEMJFAMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZ());
+  BOB_CATCH_MEMBER("`z` could not be read", 0)
+}
+int PyBobLearnEMJFAMachine_setZ(PyBobLearnEMJFAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, Z.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "z");
+  if (!b) return -1;
+  self->cxx->setZ(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`z` vector could not be set", -1)
+}
+
+
+/***** x *****/
+static auto X = bob::extension::VariableDoc(
+  "x",
+  "array_like <float, 1D>",
+  "Returns the X session factor. Eq (29) from [McCool2013]",
+  "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines."
+);
+PyObject* PyBobLearnEMJFAMachine_getX(PyBobLearnEMJFAMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getX());
+  BOB_CATCH_MEMBER("`x` could not be read", 0)
+}
+
+
+/***** jfa_base *****/
+static auto jfa_base = bob::extension::VariableDoc(
+  "jfa_base",
+  ":py:class:`bob.learn.em.JFABase`",
+  "The JFABase attached to this machine",
+  ""
+);
+PyObject* PyBobLearnEMJFAMachine_getJFABase(PyBobLearnEMJFAMachineObject* self, void*){
+  BOB_TRY
+
+  boost::shared_ptr<bob::learn::em::JFABase> jfa_base_o = self->cxx->getJFABase();
+
+  //Allocating the correspondent python object
+  PyBobLearnEMJFABaseObject* retval =
+    (PyBobLearnEMJFABaseObject*)PyBobLearnEMJFABase_Type.tp_alloc(&PyBobLearnEMJFABase_Type, 0);
+  retval->cxx = jfa_base_o;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("jfa_base could not be read", 0)
+}
+int PyBobLearnEMJFAMachine_setJFABase(PyBobLearnEMJFAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMJFABase_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.JFABase`", Py_TYPE(self)->tp_name, jfa_base.name());
+    return -1;
+  }
+
+  PyBobLearnEMJFABaseObject* jfa_base_o = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMJFABase_Type,&jfa_base_o);
+
+  self->cxx->setJFABase(jfa_base_o->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("jfa_base could not be set", -1)  
+}
+
+
+
+
+static PyGetSetDef PyBobLearnEMJFAMachine_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMJFAMachine_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  
+  {
+   supervector_length.name(),
+   (getter)PyBobLearnEMJFAMachine_getSupervectorLength,
+   0,
+   supervector_length.doc(),
+   0
+  },
+  
+  {
+   jfa_base.name(),
+   (getter)PyBobLearnEMJFAMachine_getJFABase,
+   (setter)PyBobLearnEMJFAMachine_setJFABase,
+   jfa_base.doc(),
+   0
+  },
+
+  {
+   Y.name(),
+   (getter)PyBobLearnEMJFAMachine_getY,
+   (setter)PyBobLearnEMJFAMachine_setY,
+   Y.doc(),
+   0
+  },
+
+  {
+   Z.name(),
+   (getter)PyBobLearnEMJFAMachine_getZ,
+   (setter)PyBobLearnEMJFAMachine_setZ,
+   Z.doc(),
+   0
+  },
+
+  {
+   X.name(),
+   (getter)PyBobLearnEMJFAMachine_getX,
+   0,
+   X.doc(),
+   0
+  },
+
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the JFAMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMJFAMachine_Save(PyBobLearnEMJFAMachineObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the JFAMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMJFAMachine_Load(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this JFAMachine with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.JFAMachine`", "A JFAMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMJFAMachine_IsSimilarTo(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMJFAMachineObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMJFAMachine_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** estimate_x ***/
+static auto estimate_x = bob::extension::FunctionDoc(
+  "estimate_x",
+  "Estimates the session offset x (LPT assumption) given GMM statistics.",
+  "Estimates x from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM", 
+  true
+)
+.add_prototype("stats,input")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM")
+.add_parameter("input", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMJFAMachine_estimateX(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = estimate_x.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyBlitzArrayObject* input           = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+                                                                 &PyBlitzArray_Converter,&input))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+  self->cxx->estimateX(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
+
+  BOB_CATCH_MEMBER("cannot estimate X", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** estimate_ux ***/
+static auto estimate_ux = bob::extension::FunctionDoc(
+  "estimate_ux",
+  "Estimates Ux (LPT assumption) given GMM statistics.",
+  "Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.", 
+  true
+)
+.add_prototype("stats,input")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM")
+.add_parameter("input", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMJFAMachine_estimateUx(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = estimate_ux.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyBlitzArrayObject* input           = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+                                                                 &PyBlitzArray_Converter,&input))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+  self->cxx->estimateUx(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
+
+  BOB_CATCH_MEMBER("cannot estimate Ux", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** forward_ux ***/
+static auto forward_ux = bob::extension::FunctionDoc(
+  "forward_ux",
+  "Computes a score for the given UBM statistics and given the Ux vector",
+  "", 
+  true
+)
+.add_prototype("stats,ux")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input")
+.add_parameter("ux", "array_like <float, 1D>", "Input vector");
+static PyObject* PyBobLearnEMJFAMachine_ForwardUx(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = forward_ux.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  PyBlitzArrayObject* ux_input        = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+                                                                 &PyBlitzArray_Converter,&ux_input))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto ux_input_ = make_safe(ux_input);
+  double score = self->cxx->forward(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(ux_input));
+  
+  return Py_BuildValue("d", score);
+  BOB_CATCH_MEMBER("cannot forward_ux", 0)
+
+}
+
+
+/*** forward ***/
+static auto forward = bob::extension::FunctionDoc(
+  "forward",
+  "Execute the machine",
+  "", 
+  true
+)
+.add_prototype("stats")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
+static PyObject* PyBobLearnEMJFAMachine_Forward(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  char** kwlist = forward.kwlist(0);
+
+  PyBobLearnEMGMMStatsObject* stats = 0;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
+    Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  double score = self->cxx->forward(*stats->cxx);
+
+  return Py_BuildValue("d", score);
+  BOB_CATCH_MEMBER("cannot forward", 0)
+
+}
+
+
+static PyMethodDef PyBobLearnEMJFAMachine_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  
+  {
+    estimate_x.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_estimateX,
+    METH_VARARGS|METH_KEYWORDS,
+    estimate_x.doc()
+  },
+  
+  {
+    estimate_ux.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_estimateUx,
+    METH_VARARGS|METH_KEYWORDS,
+    estimate_ux.doc()
+  },
+
+  {
+    forward_ux.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_ForwardUx,
+    METH_VARARGS|METH_KEYWORDS,
+    forward_ux.doc()
+  },
+/*
+  {
+    forward.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_Forward,
+    METH_VARARGS|METH_KEYWORDS,
+    forward.doc()
+  },*/
+
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the JFA type struct; will be initialized later
+PyTypeObject PyBobLearnEMJFAMachine_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMJFAMachine(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMJFAMachine_Type.tp_name      = JFAMachine_doc.name();
+  PyBobLearnEMJFAMachine_Type.tp_basicsize = sizeof(PyBobLearnEMJFAMachineObject);
+  PyBobLearnEMJFAMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMJFAMachine_Type.tp_doc       = JFAMachine_doc.doc();
+
+  // set the functions
+  PyBobLearnEMJFAMachine_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMJFAMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMJFAMachine_init);
+  PyBobLearnEMJFAMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMJFAMachine_delete);
+  PyBobLearnEMJFAMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMJFAMachine_RichCompare);
+  PyBobLearnEMJFAMachine_Type.tp_methods     = PyBobLearnEMJFAMachine_methods;
+  PyBobLearnEMJFAMachine_Type.tp_getset      = PyBobLearnEMJFAMachine_getseters;
+  PyBobLearnEMJFAMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnEMJFAMachine_Forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMJFAMachine_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMJFAMachine_Type);
+  return PyModule_AddObject(module, "JFAMachine", (PyObject*)&PyBobLearnEMJFAMachine_Type) >= 0;
+}
+
diff --git a/bob/learn/em/jfa_trainer.cpp b/bob/learn/em/jfa_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ffe4147908341613d1ff52bb19de64233e96fdd7
--- /dev/null
+++ b/bob/learn/em/jfa_trainer.cpp
@@ -0,0 +1,1013 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Sun 01 Fev 09:40:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+#include <boost/make_shared.hpp>
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static int extract_GMMStats_1d(PyObject *list,
+                             std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++){
+  
+    PyBobLearnEMGMMStatsObject* stats;
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
+      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+      return -1;
+    }
+    training_data.push_back(stats->cxx);
+  }
+  return 0;
+}
+
+static int extract_GMMStats_2d(PyObject *list,
+                             std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > >& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyObject* another_list;
+    PyArg_Parse(PyList_GetItem(list, i), "O!", &PyList_Type, &another_list);
+
+    std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > another_training_data;
+    for (int j=0; j<PyList_GET_SIZE(another_list); j++){
+
+      PyBobLearnEMGMMStatsObject* stats;
+      if (!PyArg_Parse(PyList_GetItem(another_list, j), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
+        PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+        return -1;
+      }
+      another_training_data.push_back(stats->cxx);
+    }
+    training_data.push_back(another_training_data);
+  }
+  return 0;
+}
+
+template <int N>
+static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
+{
+  PyObject* list = PyList_New(vec.size());
+  for(size_t i=0; i<vec.size(); i++){
+    blitz::Array<double,N> numpy_array = vec[i];
+    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
+    PyList_SET_ITEM(list, i, numpy_py_object);
+  }
+  return list;
+}
+
+template <int N>
+int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyBlitzArrayObject* blitz_object; 
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
+      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
+      return -1;
+    }
+    auto blitz_object_ = make_safe(blitz_object);
+    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
+  }
+  return 0;
+}
+
+
+
+static auto JFATrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".JFATrainer",
+  "JFATrainer"
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new JFATrainer",
+    "",
+    true
+  )
+  .add_prototype("other","")
+  .add_prototype("","")
+  .add_parameter("other", ":py:class:`bob.learn.em.JFATrainer`", "A JFATrainer object to be copied.")
+);
+
+
+static int PyBobLearnEMJFATrainer_init_copy(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFATrainer_doc.kwlist(0);
+  PyBobLearnEMJFATrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMJFATrainer_Type, &o)){
+    JFATrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::JFATrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMJFATrainer_init(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch(nargs){
+    case 0:{
+      self->cxx.reset(new bob::learn::em::JFATrainer());
+      return 0;
+    }
+    case 1:{
+      // If the constructor input is JFATrainer object
+      return PyBobLearnEMJFATrainer_init_copy(self, args, kwargs);
+    }
+    default:{
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 and 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      JFATrainer_doc.print_usage();
+      return -1;
+    }
+  }
+  BOB_CATCH_MEMBER("cannot create JFATrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMJFATrainer_delete(PyBobLearnEMJFATrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMJFATrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMJFATrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMJFATrainer_RichCompare(PyBobLearnEMJFATrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMJFATrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMJFATrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare JFATrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+static auto acc_v_a1 = bob::extension::VariableDoc(
+  "acc_v_a1",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_acc_v_a1(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccVA1());
+  BOB_CATCH_MEMBER("acc_v_a1 could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_acc_v_a1(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_v_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_v_a1");
+  if (!b) return -1;
+  self->cxx->setAccVA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_v_a1 could not be set", -1)
+}
+
+
+static auto acc_v_a2 = bob::extension::VariableDoc(
+  "acc_v_a2",
+  "array_like <float, 2D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_acc_v_a2(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccVA2());
+  BOB_CATCH_MEMBER("acc_v_a2 could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_acc_v_a2(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_v_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_v_a2");
+  if (!b) return -1;
+  self->cxx->setAccVA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_v_a2 could not be set", -1)
+}
+
+
+static auto acc_u_a1 = bob::extension::VariableDoc(
+  "acc_u_a1",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_acc_u_a1(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA1());
+  BOB_CATCH_MEMBER("acc_u_a1 could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_acc_u_a1(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_u_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_u_a1");
+  if (!b) return -1;
+  self->cxx->setAccUA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_u_a1 could not be set", -1)
+}
+
+
+static auto acc_u_a2 = bob::extension::VariableDoc(
+  "acc_u_a2",
+  "array_like <float, 2D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_acc_u_a2(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA2());
+  BOB_CATCH_MEMBER("acc_u_a2 could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_acc_u_a2(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_u_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_u_a2");
+  if (!b) return -1;
+  self->cxx->setAccUA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_u_a2 could not be set", -1)
+}
+
+
+static auto acc_d_a1 = bob::extension::VariableDoc(
+  "acc_d_a1",
+  "array_like <float, 1D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_acc_d_a1(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccDA1());
+  BOB_CATCH_MEMBER("acc_d_a1 could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_acc_d_a1(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_d_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_d_a1");
+  if (!b) return -1;
+  self->cxx->setAccDA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_d_a1 could not be set", -1)
+}
+
+
+static auto acc_d_a2 = bob::extension::VariableDoc(
+  "acc_d_a2",
+  "array_like <float, 1D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_acc_d_a2(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccDA2());
+  BOB_CATCH_MEMBER("acc_d_a2 could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_acc_d_a2(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_d_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_d_a2");
+  if (!b) return -1;
+  self->cxx->setAccDA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_d_a2 could not be set", -1)
+}
+
+
+static auto __X__ = bob::extension::VariableDoc(
+  "__X__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_X(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getX());
+  BOB_CATCH_MEMBER("__X__ could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_X(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  if (!PyList_Check(value)){
+    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __X__.name());
+    return -1;
+  }
+    
+  std::vector<blitz::Array<double,2> > data;
+  if(list_as_vector(value ,data)==0){
+    self->cxx->setX(data);
+  }
+    
+  return 0;
+  BOB_CATCH_MEMBER("__X__ could not be written", 0)
+}
+
+
+
+static auto __Y__ = bob::extension::VariableDoc(
+  "__Y__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_Y(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getY());
+  BOB_CATCH_MEMBER("__Y__ could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_Y(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  if (!PyList_Check(value)){
+    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Y__.name());
+    return -1;
+  }
+    
+  std::vector<blitz::Array<double,1> > data;
+  if(list_as_vector(value ,data)==0){
+    self->cxx->setY(data);
+  }
+    
+  return 0;
+  BOB_CATCH_MEMBER("__Y__ could not be written", 0)
+}
+
+
+
+static auto __Z__ = bob::extension::VariableDoc(
+  "__Z__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_get_Z(PyBobLearnEMJFATrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getZ());
+  BOB_CATCH_MEMBER("__Z__ could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_set_Z(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  if (!PyList_Check(value)){
+    PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Z__.name());
+    return -1;
+  }
+    
+  std::vector<blitz::Array<double,1> > data;
+  if(list_as_vector(value ,data)==0){
+    self->cxx->setZ(data);
+  }
+    
+  return 0;
+  BOB_CATCH_MEMBER("__Z__ could not be written", 0)
+}
+
+
+
+/***** rng *****/
+static auto rng = bob::extension::VariableDoc(
+  "rng",
+  "str",
+  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
+  ""
+);
+PyObject* PyBobLearnEMJFATrainer_getRng(PyBobLearnEMJFATrainerObject* self, void*) {
+  BOB_TRY
+  //Allocating the correspondent python object
+  
+  PyBoostMt19937Object* retval =
+    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
+
+  retval->rng = self->cxx->getRng().get();
+  return Py_BuildValue("O", retval);
+  BOB_CATCH_MEMBER("Rng method could not be read", 0)
+}
+int PyBobLearnEMJFATrainer_setRng(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBoostMt19937_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
+    return -1;
+  }
+
+  PyBoostMt19937Object* boostObject = 0;
+  PyBoostMt19937_Converter(value, &boostObject);
+  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
+
+  return 0;
+  BOB_CATCH_MEMBER("Rng could not be set", 0)
+}
+
+static PyGetSetDef PyBobLearnEMJFATrainer_getseters[] = { 
+  {
+   acc_v_a1.name(),
+   (getter)PyBobLearnEMJFATrainer_get_acc_v_a1,
+   (setter)PyBobLearnEMJFATrainer_get_acc_v_a1,
+   acc_v_a1.doc(),
+   0
+  },
+  {
+   acc_v_a2.name(),
+   (getter)PyBobLearnEMJFATrainer_get_acc_v_a2,
+   (setter)PyBobLearnEMJFATrainer_get_acc_v_a2,
+   acc_v_a2.doc(),
+   0
+  },
+  {
+   acc_u_a1.name(),
+   (getter)PyBobLearnEMJFATrainer_get_acc_u_a1,
+   (setter)PyBobLearnEMJFATrainer_get_acc_u_a1,
+   acc_u_a1.doc(),
+   0
+  },
+  {
+   acc_u_a2.name(),
+   (getter)PyBobLearnEMJFATrainer_get_acc_u_a2,
+   (setter)PyBobLearnEMJFATrainer_get_acc_u_a2,
+   acc_u_a2.doc(),
+   0
+  },
+  {
+   acc_d_a1.name(),
+   (getter)PyBobLearnEMJFATrainer_get_acc_d_a1,
+   (setter)PyBobLearnEMJFATrainer_get_acc_d_a1,
+   acc_d_a1.doc(),
+   0
+  },
+  {
+   acc_d_a2.name(),
+   (getter)PyBobLearnEMJFATrainer_get_acc_d_a2,
+   (setter)PyBobLearnEMJFATrainer_get_acc_d_a2,
+   acc_d_a2.doc(),
+   0
+  },
+  {
+   rng.name(),
+   (getter)PyBobLearnEMJFATrainer_getRng,
+   (setter)PyBobLearnEMJFATrainer_setRng,
+   rng.doc(),
+   0
+  },
+  {
+   __X__.name(),
+   (getter)PyBobLearnEMJFATrainer_get_X,
+   (setter)PyBobLearnEMJFATrainer_set_X,
+   __X__.doc(),
+   0
+  },
+  {
+   __Y__.name(),
+   (getter)PyBobLearnEMJFATrainer_get_Y,
+   (setter)PyBobLearnEMJFATrainer_set_Y,
+   __Y__.doc(),
+   0
+  },
+  {
+   __Z__.name(),
+   (getter)PyBobLearnEMJFATrainer_get_Z,
+   (setter)PyBobLearnEMJFATrainer_set_Z,
+   __Z__.doc(),
+   0
+  },
+  
+  
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_initialize(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->initialize(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step1 ***/
+static auto e_step1 = bob::extension::FunctionDoc(
+  "e_step1",
+  "Call the 1st e-step procedure (for the V subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_e_step1(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  //Parses input arguments in a single shot
+  char** kwlist = e_step1.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->eStep1(*jfa_base->cxx, training_data);
+
+
+  BOB_CATCH_MEMBER("cannot perform the e_step1 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step1 ***/
+static auto m_step1 = bob::extension::FunctionDoc(
+  "m_step1",
+  "Call the 1st m-step procedure (for the V subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_m_step1(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = m_step1.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->mStep1(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step1 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize1 ***/
+static auto finalize1 = bob::extension::FunctionDoc(
+  "finalize1",
+  "Call the 1st finalize procedure (for the V subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_finalize1(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  //Parses input arguments in a single shot
+  char** kwlist = finalize1.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->finalize1(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize1 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step2 ***/
+static auto e_step2 = bob::extension::FunctionDoc(
+  "e_step2",
+  "Call the 2nd e-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_e_step2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = e_step2.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->eStep2(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the e_step2 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step2 ***/
+static auto m_step2 = bob::extension::FunctionDoc(
+  "m_step2",
+  "Call the 2nd m-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_m_step2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot 
+  char** kwlist = m_step2.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->mStep2(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step2 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize2 ***/
+static auto finalize2 = bob::extension::FunctionDoc(
+  "finalize2",
+  "Call the 2nd finalize procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_finalize2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = finalize2.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->finalize2(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize2 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step3 ***/
+static auto e_step3 = bob::extension::FunctionDoc(
+  "e_step3",
+  "Call the 3rd e-step procedure (for the d subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_e_step3(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = e_step3.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->eStep3(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the e_step3 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step3 ***/
+static auto m_step3 = bob::extension::FunctionDoc(
+  "m_step3",
+  "Call the 3rd m-step procedure (for the d subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_m_step3(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = m_step3.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->mStep3(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step3 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize3 ***/
+static auto finalize3 = bob::extension::FunctionDoc(
+  "finalize3",
+  "Call the 3rd finalize procedure (for the d subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnEMJFATrainer_finalize3(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = finalize3.kwlist(0);
+
+  PyBobLearnEMJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
+  if(extract_GMMStats_2d(stats ,training_data)==0)
+    self->cxx->finalize3(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize3 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** enrol ***/
+static auto enrol = bob::extension::FunctionDoc(
+  "enrol",
+  "",
+  "",
+  true
+)
+.add_prototype("jfa_machine,features,n_iter","")
+.add_parameter("jfa_machine", ":py:class:`bob.learn.em.JFAMachine`", "JFAMachine Object")
+.add_parameter("features", "list(:py:class:`bob.learn.em.GMMStats`)`", "")
+.add_parameter("n_iter", "int", "Number of iterations");
+static PyObject* PyBobLearnEMJFATrainer_enrol(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = enrol.kwlist(0);
+
+  PyBobLearnEMJFAMachineObject* jfa_machine = 0;
+  PyObject* stats = 0;
+  int n_iter = 1;
+
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!i", kwlist, &PyBobLearnEMJFAMachine_Type, &jfa_machine,
+                                                                  &PyList_Type, &stats, &n_iter)) Py_RETURN_NONE;
+
+  std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > training_data;
+  if(extract_GMMStats_1d(stats ,training_data)==0)
+    self->cxx->enrol(*jfa_machine->cxx, training_data, n_iter);
+
+  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+static PyMethodDef PyBobLearnEMJFATrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    e_step1.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_e_step1,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step1.doc()
+  },
+  {
+    e_step2.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_e_step2,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step2.doc()
+  },
+  {
+    e_step3.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_e_step3,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step3.doc()
+  },
+  {
+    m_step1.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_m_step1,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step1.doc()
+  },
+  {
+    m_step2.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_m_step2,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step2.doc()
+  },
+  {
+    m_step3.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_m_step3,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step3.doc()
+  },
+  {
+    finalize1.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_finalize1,
+    METH_VARARGS|METH_KEYWORDS,
+    finalize1.doc()
+  },
+  {
+    finalize2.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_finalize2,
+    METH_VARARGS|METH_KEYWORDS,
+    finalize2.doc()
+  },
+  {
+    finalize3.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_finalize3,
+    METH_VARARGS|METH_KEYWORDS,
+    finalize3.doc()
+  },
+  {
+    enrol.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_enrol,
+    METH_VARARGS|METH_KEYWORDS,
+    enrol.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMJFATrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMJFATrainer(PyObject* module)
+{
+  // initialize the type JFATrainer
+  PyBobLearnEMJFATrainer_Type.tp_name      = JFATrainer_doc.name();
+  PyBobLearnEMJFATrainer_Type.tp_basicsize = sizeof(PyBobLearnEMJFATrainerObject);
+  PyBobLearnEMJFATrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
+  PyBobLearnEMJFATrainer_Type.tp_doc       = JFATrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMJFATrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMJFATrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMJFATrainer_init);
+  PyBobLearnEMJFATrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMJFATrainer_delete);
+  PyBobLearnEMJFATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMJFATrainer_RichCompare);
+  PyBobLearnEMJFATrainer_Type.tp_methods      = PyBobLearnEMJFATrainer_methods;
+  PyBobLearnEMJFATrainer_Type.tp_getset       = PyBobLearnEMJFATrainer_getseters;
+  //PyBobLearnEMJFATrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMJFATrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMJFATrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMJFATrainer_Type);
+  return PyModule_AddObject(module, "_JFATrainer", (PyObject*)&PyBobLearnEMJFATrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/kmeans_machine.cpp b/bob/learn/em/kmeans_machine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0fc4de7cb3ab4e64fd73bbf52d2ffcf9f5bad4b3
--- /dev/null
+++ b/bob/learn/em/kmeans_machine.cpp
@@ -0,0 +1,768 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Fri 26 Dec 16:18:00 2014
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto KMeansMachine_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".KMeansMachine",
+  "This class implements a k-means classifier.\n"
+  "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a KMeansMachine",
+    "",
+    true
+  )
+  .add_prototype("n_means,n_inputs","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+  .add_prototype("","")
+
+  .add_parameter("n_means", "int", "Number of means")
+  .add_parameter("n_inputs", "int", "Dimension of the feature vector")
+  .add_parameter("other", ":py:class:`bob.learn.em.KMeansMachine`", "A KMeansMachine object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMKMeansMachine_init_number(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = KMeansMachine_doc.kwlist(0);
+  int n_inputs    = 1;
+  int n_means = 1;
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_means, &n_inputs))
+    return -1;
+
+  if(n_means < 0){
+    PyErr_Format(PyExc_TypeError, "means argument must be greater than or equal to zero");
+    KMeansMachine_doc.print_usage();
+    return -1;
+  }
+
+  if(n_inputs < 0){
+    PyErr_Format(PyExc_TypeError, "input argument must be greater than or equal to zero");
+    KMeansMachine_doc.print_usage();
+    return -1;
+   }
+
+  self->cxx.reset(new bob::learn::em::KMeansMachine(n_means, n_inputs));
+  return 0;
+}
+
+
+static int PyBobLearnEMKMeansMachine_init_copy(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = KMeansMachine_doc.kwlist(1);
+  PyBobLearnEMKMeansMachineObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &tt)){
+    KMeansMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::KMeansMachine(*tt->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMKMeansMachine_init_hdf5(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = KMeansMachine_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    KMeansMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::KMeansMachine(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMKMeansMachine_init(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+  
+  switch (nargs) {
+
+    case 0: //default initializer ()
+      self->cxx.reset(new bob::learn::em::KMeansMachine());
+      return 0;
+
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is Gaussian object
+     if (PyBobLearnEMKMeansMachine_Check(arg))
+       return PyBobLearnEMKMeansMachine_init_copy(self, args, kwargs);
+      // If the constructor input is a HDF5
+     else if (PyBobIoHDF5File_Check(arg))
+       return PyBobLearnEMKMeansMachine_init_hdf5(self, args, kwargs);
+    }
+    case 2:
+      return PyBobLearnEMKMeansMachine_init_number(self, args, kwargs);
+    default:
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0, 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      KMeansMachine_doc.print_usage();
+      return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create KMeansMachine", 0)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMKMeansMachine_delete(PyBobLearnEMKMeansMachineObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMKMeansMachine_RichCompare(PyBobLearnEMKMeansMachineObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMKMeansMachine_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMKMeansMachineObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare KMeansMachine objects", 0)
+}
+
+int PyBobLearnEMKMeansMachine_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMKMeansMachine_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int)",
+  "A tuple that represents the number of means and dimensionality of the feature vector``(n_means, dim)``.",
+  ""
+);
+PyObject* PyBobLearnEMKMeansMachine_getShape(PyBobLearnEMKMeansMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i)", self->cxx->getNMeans(), self->cxx->getNInputs());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+/***** MEAN *****/
+
+static auto means = bob::extension::VariableDoc(
+  "means",
+  "array_like <float, 2D>",
+  "The means",
+  ""
+);
+PyObject* PyBobLearnEMKMeansMachine_getMeans(PyBobLearnEMKMeansMachineObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMeans());
+  BOB_CATCH_MEMBER("means could not be read", 0)
+}
+int PyBobLearnEMKMeansMachine_setMeans(PyBobLearnEMKMeansMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, means.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "means");
+  if (!b) return -1;
+  self->cxx->setMeans(*b);
+  return 0;
+  BOB_CATCH_MEMBER("means could not be set", -1)
+}
+
+
+static PyGetSetDef PyBobLearnEMKMeansMachine_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMKMeansMachine_getShape,
+   0,
+   shape.doc(),
+   0
+  },
+  {
+   means.name(),
+   (getter)PyBobLearnEMKMeansMachine_getMeans,
+   (setter)PyBobLearnEMKMeansMachine_setMeans,
+   means.doc(),
+   0
+  },
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the KMeansMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMKMeansMachine_Save(PyBobLearnEMKMeansMachineObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the KMeansMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMKMeansMachine_Load(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this KMeansMachine with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.KMeansMachine`", "A KMeansMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMKMeansMachine_IsSimilarTo(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMKMeansMachineObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMKMeansMachine_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Allocates space for the statistics and resets to zero.",
+  0,
+  true
+)
+.add_prototype("n_means,n_inputs")
+.add_parameter("n_means", "int", "Number of means")
+.add_parameter("n_inputs", "int", "Dimensionality of the feature vector");
+static PyObject* PyBobLearnEMKMeansMachine_resize(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int n_means = 0;
+  int n_inputs = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "ii", kwlist, &n_means, &n_inputs)) Py_RETURN_NONE;
+
+  if (n_means <= 0){
+    PyErr_Format(PyExc_TypeError, "n_means must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+  if (n_inputs <= 0){
+    PyErr_Format(PyExc_TypeError, "n_inputs must be greater than zero");
+    resize.print_usage();
+    return 0;
+  }
+
+  self->cxx->resize(n_means, n_inputs);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+/*** get_mean ***/
+static auto get_mean = bob::extension::FunctionDoc(
+  "get_mean",
+  "Get the i'th mean.",
+  ".. note:: An exception is thrown if i is out of range.", 
+  true
+)
+.add_prototype("i")
+.add_parameter("i", "int", "Index of the mean")
+.add_return("mean","array_like <float, 1D>","Mean array");
+static PyObject* PyBobLearnEMKMeansMachine_get_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_mean.kwlist(0);
+
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+ 
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMean(i));
+
+  BOB_CATCH_MEMBER("cannot get the mean", 0)
+}
+
+
+/*** set_mean ***/
+static auto set_mean = bob::extension::FunctionDoc(
+  "set_mean",
+  "Set the i'th mean.",
+  ".. note:: An exception is thrown if i is out of range.", 
+  true
+)
+.add_prototype("i,mean")
+.add_parameter("i", "int", "Index of the mean")
+.add_parameter("mean", "array_like <float, 1D>", "Mean array");
+static PyObject* PyBobLearnEMKMeansMachine_set_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = set_mean.kwlist(0);
+
+  int i = 0;
+  PyBlitzArrayObject* mean = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &mean)) Py_RETURN_NONE;
+  
+  //protects acquired resources through this scope
+  auto mean_ = make_safe(mean);
+
+  //setting the mean
+  self->cxx->setMean(i, *PyBlitzArrayCxx_AsBlitz<double,1>(mean));
+
+  BOB_CATCH_MEMBER("cannot set the mean", 0)
+  
+  Py_RETURN_NONE;
+}
+
+
+
+/*** get_distance_from_mean ***/
+static auto get_distance_from_mean = bob::extension::FunctionDoc(
+  "get_distance_from_mean",
+  "Return the power of two of the square Euclidean distance of the sample, x, to the i'th mean.",
+  ".. note:: An exception is thrown if i is out of range.", 
+  true
+)
+.add_prototype("input,i","output")
+.add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
+.add_parameter("i", "int", "The index of the mean")
+.add_return("output","float","Square Euclidean distance of the sample, x, to the i'th mean");
+static PyObject* PyBobLearnEMKMeansMachine_get_distance_from_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_distance_from_mean.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&i", kwlist, &PyBlitzArray_Converter, &input, &i)){ 
+    Py_RETURN_NONE;
+  }
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  double output = self->cxx->getDistanceFromMean(*PyBlitzArrayCxx_AsBlitz<double,1>(input),i);
+  return Py_BuildValue("d", output);
+
+  BOB_CATCH_MEMBER("cannot compute the likelihood", 0)
+}
+
+
+/*** get_closest_mean ***/
+static auto get_closest_mean = bob::extension::FunctionDoc(
+  "get_closest_mean",
+  "Calculate the index of the mean that is closest (in terms of square Euclidean distance) to the data sample, x.",
+  "",
+  true
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
+.add_return("output", "(int, int)", "Tuple containing the closest mean and the minimum distance from the input");
+static PyObject* PyBobLearnEMKMeansMachine_get_closest_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_closest_mean.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  size_t closest_mean = 0;
+  double min_distance = -1;   
+  self->cxx->getClosestMean(*PyBlitzArrayCxx_AsBlitz<double,1>(input), closest_mean, min_distance);
+    
+  return Py_BuildValue("(i,d)", closest_mean, min_distance);
+
+  BOB_CATCH_MEMBER("cannot compute the closest mean", 0)
+}
+
+
+/*** get_min_distance ***/
+static auto get_min_distance = bob::extension::FunctionDoc(
+  "get_min_distance",
+  "Output the minimum (Square Euclidean) distance between the input and the closest mean ",
+  "",
+  true
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
+.add_return("output", "double", "The minimum distance");
+static PyObject* PyBobLearnEMKMeansMachine_get_min_distance(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_min_distance.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  double min_distance = 0;   
+  min_distance = self->cxx->getMinDistance(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
+
+  return Py_BuildValue("d", min_distance);
+
+  BOB_CATCH_MEMBER("cannot compute the min distance", 0)
+}
+
+/**** get_variances_and_weights_for_each_cluster ***/
+static auto get_variances_and_weights_for_each_cluster = bob::extension::FunctionDoc(
+  "get_variances_and_weights_for_each_cluster",
+  "For each mean, find the subset of the samples that is closest to that mean, and calculate"
+  " 1) the variance of that subset (the cluster variance)" 
+  " 2) the proportion of the samples represented by that subset (the cluster weight)",
+  "",
+  true
+)
+.add_prototype("input","output")
+.add_parameter("input", "array_like <float, 2D>", "The data sample (feature vector)")
+.add_return("output", "(array_like <float, 2D>, array_like <float, 1D>)", "A tuple with the variances and the weights respectively");
+static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist =  get_variances_and_weights_for_each_cluster.kwlist(0);
+
+  PyBlitzArrayObject* input = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto input_ = make_safe(input);
+
+  blitz::Array<double,2> variances(self->cxx->getNMeans(),self->cxx->getNInputs());
+  blitz::Array<double,1> weights(self->cxx->getNMeans());
+  
+  self->cxx->getVariancesAndWeightsForEachCluster(*PyBlitzArrayCxx_AsBlitz<double,2>(input),variances,weights);
+
+  return Py_BuildValue("(O,O)",PyBlitzArrayCxx_AsConstNumpy(variances), PyBlitzArrayCxx_AsConstNumpy(weights));
+
+  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
+}
+
+
+/**** __get_variances_and_weights_for_each_cluster_init__ ***/
+static auto __get_variances_and_weights_for_each_cluster_init__ = bob::extension::FunctionDoc(
+  "__get_variances_and_weights_for_each_cluster_init__",
+  "Methods consecutively called by getVariancesAndWeightsForEachCluster()"
+  "This should help for the parallelization on several nodes by splitting the data and calling"
+  "getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum"
+  "with the m_cache_means, variances, and weights variables before performing the merge on one"
+  "node using getVariancesAndWeightsForEachClusterFin().",
+  "",
+  true
+)
+.add_prototype("variances,weights","")
+.add_parameter("variances", "array_like <float, 2D>", "Variance array")
+.add_parameter("weights", "array_like <float, 1D>", "Weight array");
+static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_init(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist =  __get_variances_and_weights_for_each_cluster_init__.kwlist(0);
+
+  PyBlitzArrayObject* variances = 0;
+  PyBlitzArrayObject* weights   = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &variances,  &PyBlitzArray_Converter, &weights)) Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto weights_   = make_safe(weights);
+  auto variances_ = make_safe(variances);
+
+  self->cxx->getVariancesAndWeightsForEachClusterInit(*PyBlitzArrayCxx_AsBlitz<double,2>(variances), *PyBlitzArrayCxx_AsBlitz<double,1>(weights));
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
+}
+
+
+/**** __get_variances_and_weights_for_each_cluster_acc__ ***/
+static auto __get_variances_and_weights_for_each_cluster_acc__ = bob::extension::FunctionDoc(
+  "__get_variances_and_weights_for_each_cluster_acc__",
+  "Methods consecutively called by getVariancesAndWeightsForEachCluster()"
+  "This should help for the parallelization on several nodes by splitting the data and calling"
+  "getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum"
+  "with the m_cache_means, variances, and weights variables before performing the merge on one"
+  "node using getVariancesAndWeightsForEachClusterFin().",
+  "",
+  true
+)
+.add_prototype("data,variances,weights","")
+.add_parameter("data", "array_like <float, 2D>", "data array")
+.add_parameter("variances", "array_like <float, 2D>", "Variance array")
+.add_parameter("weights", "array_like <float, 1D>", "Weight array");
+static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_acc(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist =  __get_variances_and_weights_for_each_cluster_acc__.kwlist(0);
+
+  PyBlitzArrayObject* data      = 0;
+  PyBlitzArrayObject* variances = 0;
+  PyBlitzArrayObject* weights   = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&", kwlist, &PyBlitzArray_Converter, &data, &PyBlitzArray_Converter, &variances, &PyBlitzArray_Converter, &weights)) Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto data_      = make_safe(data);
+  auto weights_   = make_safe(weights);
+  auto variances_ = make_safe(variances);
+
+  self->cxx->getVariancesAndWeightsForEachClusterAcc(*PyBlitzArrayCxx_AsBlitz<double,2>(data), *PyBlitzArrayCxx_AsBlitz<double,2>(variances), *PyBlitzArrayCxx_AsBlitz<double,1>(weights));
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
+}
+
+
+/**** __get_variances_and_weights_for_each_cluster_fin__ ***/
+static auto __get_variances_and_weights_for_each_cluster_fin__ = bob::extension::FunctionDoc(
+  "__get_variances_and_weights_for_each_cluster_fin__",
+  "Methods consecutively called by getVariancesAndWeightsForEachCluster()"
+  "This should help for the parallelization on several nodes by splitting the data and calling"
+  "getVariancesAndWeightsForEachClusterAcc() for each split. In this case, there is a need to sum"
+  "with the m_cache_means, variances, and weights variables before performing the merge on one"
+  "node using getVariancesAndWeightsForEachClusterFin().",
+  "",
+  true
+)
+.add_prototype("variances,weights","")
+.add_parameter("variances", "array_like <float, 2D>", "Variance array")
+.add_parameter("weights", "array_like <float, 1D>", "Weight array");
+static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_fin(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist =  __get_variances_and_weights_for_each_cluster_fin__.kwlist(0);
+
+  PyBlitzArrayObject* variances = 0;
+  PyBlitzArrayObject* weights   = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &variances,  &PyBlitzArray_Converter, &weights)) Py_RETURN_NONE;
+
+  //protects acquired resources through this scope
+  auto weights_   = make_safe(weights);
+  auto variances_ = make_safe(variances);
+
+  self->cxx->getVariancesAndWeightsForEachClusterFin(*PyBlitzArrayCxx_AsBlitz<double,2>(variances), *PyBlitzArrayCxx_AsBlitz<double,1>(weights));
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("cannot compute the variances and weights for each cluster", 0)
+}
+
+
+static PyMethodDef PyBobLearnEMKMeansMachine_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },  
+  {
+    get_mean.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_mean,
+    METH_VARARGS|METH_KEYWORDS,
+    get_mean.doc()
+  },  
+  {
+    set_mean.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_set_mean,
+    METH_VARARGS|METH_KEYWORDS,
+    set_mean.doc()
+  },  
+  {
+    get_distance_from_mean.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_distance_from_mean,
+    METH_VARARGS|METH_KEYWORDS,
+    get_distance_from_mean.doc()
+  },  
+  {
+    get_closest_mean.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_closest_mean,
+    METH_VARARGS|METH_KEYWORDS,
+    get_closest_mean.doc()
+  },  
+  {
+    get_min_distance.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_min_distance,
+    METH_VARARGS|METH_KEYWORDS,
+    get_min_distance.doc()
+  },  
+  {
+    get_variances_and_weights_for_each_cluster.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster,
+    METH_VARARGS|METH_KEYWORDS,
+    get_variances_and_weights_for_each_cluster.doc()
+  },  
+  {
+    __get_variances_and_weights_for_each_cluster_init__.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_init,
+    METH_VARARGS|METH_KEYWORDS,
+    __get_variances_and_weights_for_each_cluster_init__.doc()
+  },  
+  {
+    __get_variances_and_weights_for_each_cluster_acc__.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_acc,
+    METH_VARARGS|METH_KEYWORDS,
+    __get_variances_and_weights_for_each_cluster_acc__.doc()
+  },  
+  {
+    __get_variances_and_weights_for_each_cluster_fin__.name(),
+    (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_fin,
+    METH_VARARGS|METH_KEYWORDS,
+    __get_variances_and_weights_for_each_cluster_fin__.doc()
+  },  
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMKMeansMachine_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMKMeansMachine(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMKMeansMachine_Type.tp_name = KMeansMachine_doc.name();
+  PyBobLearnEMKMeansMachine_Type.tp_basicsize = sizeof(PyBobLearnEMKMeansMachineObject);
+  PyBobLearnEMKMeansMachine_Type.tp_flags = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMKMeansMachine_Type.tp_doc = KMeansMachine_doc.doc();
+
+  // set the functions
+  PyBobLearnEMKMeansMachine_Type.tp_new = PyType_GenericNew;
+  PyBobLearnEMKMeansMachine_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnEMKMeansMachine_init);
+  PyBobLearnEMKMeansMachine_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnEMKMeansMachine_delete);
+  PyBobLearnEMKMeansMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMKMeansMachine_RichCompare);
+  PyBobLearnEMKMeansMachine_Type.tp_methods = PyBobLearnEMKMeansMachine_methods;
+  PyBobLearnEMKMeansMachine_Type.tp_getset = PyBobLearnEMKMeansMachine_getseters;
+  //PyBobLearnEMGMMMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMGMMMachine_loglikelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMKMeansMachine_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMKMeansMachine_Type);
+  return PyModule_AddObject(module, "KMeansMachine", (PyObject*)&PyBobLearnEMKMeansMachine_Type) >= 0;
+}
+
diff --git a/bob/learn/em/kmeans_trainer.cpp b/bob/learn/em/kmeans_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..a65637f59e0d442b79dbcae6d36af2395078b03f
--- /dev/null
+++ b/bob/learn/em/kmeans_trainer.cpp
@@ -0,0 +1,553 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Tue 13 Jan 16:50:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+// InitializationMethod type conversion
+static const std::map<std::string, bob::learn::em::KMeansTrainer::InitializationMethod> IM = {{"RANDOM",  bob::learn::em::KMeansTrainer::InitializationMethod::RANDOM}, {"RANDOM_NO_DUPLICATE", bob::learn::em::KMeansTrainer::InitializationMethod::RANDOM_NO_DUPLICATE}, {"KMEANS_PLUS_PLUS", bob::learn::em::KMeansTrainer::InitializationMethod::KMEANS_PLUS_PLUS}};
+static inline bob::learn::em::KMeansTrainer::InitializationMethod string2IM(const std::string& o){            /* converts string to InitializationMethod type */
+  auto it = IM.find(o);
+  if (it == IM.end()) throw std::runtime_error("The given InitializationMethod '" + o + "' is not known; choose one of ('RANDOM', 'RANDOM_NO_DUPLICATE', 'KMEANS_PLUS_PLUS')");
+  else return it->second;
+}
+static inline const std::string& IM2string(bob::learn::em::KMeansTrainer::InitializationMethod o){            /* converts InitializationMethod type to string */
+  for (auto it = IM.begin(); it != IM.end(); ++it) if (it->second == o) return it->first;
+  throw std::runtime_error("The given InitializationMethod type is not known");
+}
+
+
+static auto KMeansTrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX "._KMeansTrainer",
+  "Trains a KMeans machine."
+  "This class implements the expectation-maximization algorithm for a k-means machine."
+  "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
+  "It uses a random initialization of the means followed by the expectation-maximization algorithm"
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Creates a KMeansTrainer",
+    "",
+    true
+  )
+  .add_prototype("initialization_method","")
+  .add_prototype("other","")
+  .add_prototype("","")
+
+  .add_parameter("initialization_method", "str", "The initialization method of the means")
+  .add_parameter("other", ":py:class:`bob.learn.em.KMeansTrainer`", "A KMeansTrainer object to be copied.")
+
+);
+
+
+static int PyBobLearnEMKMeansTrainer_init_copy(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = KMeansTrainer_doc.kwlist(1);
+  PyBobLearnEMKMeansTrainerObject* tt;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMKMeansTrainer_Type, &tt)){
+    KMeansTrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::KMeansTrainer(*tt->cxx));
+  return 0;
+}
+
+static int PyBobLearnEMKMeansTrainer_init_str(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = KMeansTrainer_doc.kwlist(0);
+  char* value;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "s", kwlist, &value)){
+    KMeansTrainer_doc.print_usage();
+    return -1;
+  }
+  self->cxx.reset(new bob::learn::em::KMeansTrainer(string2IM(std::string(value))));
+  return 0;
+}
+
+
+static int PyBobLearnEMKMeansTrainer_init(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch (nargs) {
+
+    case 0:{ //default initializer ()
+      self->cxx.reset(new bob::learn::em::KMeansTrainer());
+      return 0;
+    }
+    case 1:{
+      //Reading the input argument
+      PyObject* arg = 0;
+      if (PyTuple_Size(args))
+        arg = PyTuple_GET_ITEM(args, 0);
+      else {
+        PyObject* tmp = PyDict_Values(kwargs);
+        auto tmp_ = make_safe(tmp);
+        arg = PyList_GET_ITEM(tmp, 0);
+      }
+
+      // If the constructor input is KMeansTrainer object
+      if (PyBobLearnEMKMeansTrainer_Check(arg))
+        return PyBobLearnEMKMeansTrainer_init_copy(self, args, kwargs);
+      else if(PyString_Check(arg))
+        return PyBobLearnEMKMeansTrainer_init_str(self, args, kwargs);
+        //return PyBobLearnEMKMeansTrainer_init_str(self, arg);
+    }
+    default:{
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 0 or 1 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      KMeansTrainer_doc.print_usage();
+      return -1;
+    }
+  }
+  BOB_CATCH_MEMBER("cannot create KMeansTrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMKMeansTrainer_delete(PyBobLearnEMKMeansTrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMKMeansTrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMKMeansTrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMKMeansTrainer_RichCompare(PyBobLearnEMKMeansTrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMKMeansTrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMKMeansTrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare KMeansTrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** initialization_method *****/
+static auto initialization_method = bob::extension::VariableDoc(
+  "initialization_method",
+  "str",
+  "Initialization method.",
+  ""
+);
+PyObject* PyBobLearnEMKMeansTrainer_getInitializationMethod(PyBobLearnEMKMeansTrainerObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("s", IM2string(self->cxx->getInitializationMethod()).c_str());
+  BOB_CATCH_MEMBER("initialization method could not be read", 0)
+}
+int PyBobLearnEMKMeansTrainer_setInitializationMethod(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyString_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, initialization_method.name());
+    return -1;
+  }
+  self->cxx->setInitializationMethod(string2IM(PyString_AS_STRING(value)));
+
+  return 0;
+  BOB_CATCH_MEMBER("initialization method could not be set", 0)
+}
+
+
+/***** zeroeth_order_statistics *****/
+static auto zeroeth_order_statistics = bob::extension::VariableDoc(
+  "zeroeth_order_statistics",
+  "array_like <float, 1D>",
+  "Returns the internal statistics. Useful to parallelize the E-step",
+  ""
+);
+PyObject* PyBobLearnEMKMeansTrainer_getZeroethOrderStatistics(PyBobLearnEMKMeansTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZeroethOrderStats());
+  BOB_CATCH_MEMBER("zeroeth_order_statistics could not be read", 0)
+}
+int PyBobLearnEMKMeansTrainer_setZeroethOrderStatistics(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, zeroeth_order_statistics.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "zeroeth_order_statistics");
+  if (!b) return -1;
+  self->cxx->setZeroethOrderStats(*b);
+  return 0;
+  BOB_CATCH_MEMBER("zeroeth_order_statistics could not be set", -1)
+}
+
+
+/***** first_order_statistics *****/
+static auto first_order_statistics = bob::extension::VariableDoc(
+  "first_order_statistics",
+  "array_like <float, 2D>",
+  "Returns the internal statistics. Useful to parallelize the E-step",
+  ""
+);
+PyObject* PyBobLearnEMKMeansTrainer_getFirstOrderStatistics(PyBobLearnEMKMeansTrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getFirstOrderStats());
+  BOB_CATCH_MEMBER("first_order_statistics could not be read", 0)
+}
+int PyBobLearnEMKMeansTrainer_setFirstOrderStatistics(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, first_order_statistics.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "first_order_statistics");
+  if (!b) return -1;
+  self->cxx->setFirstOrderStats(*b);
+  return 0;
+  BOB_CATCH_MEMBER("first_order_statistics could not be set", -1)
+}
+
+
+/***** average_min_distance *****/
+static auto average_min_distance = bob::extension::VariableDoc(
+  "average_min_distance",
+  "str",
+  "Average min (square Euclidean) distance. Useful to parallelize the E-step.",
+  ""
+);
+PyObject* PyBobLearnEMKMeansTrainer_getAverageMinDistance(PyBobLearnEMKMeansTrainerObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("d", self->cxx->getAverageMinDistance());
+  BOB_CATCH_MEMBER("Average Min Distance method could not be read", 0)
+}
+int PyBobLearnEMKMeansTrainer_setAverageMinDistance(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, average_min_distance.name());
+    return -1;
+  }
+  self->cxx->setAverageMinDistance(PyFloat_AS_DOUBLE(value));
+
+  return 0;
+  BOB_CATCH_MEMBER("Average Min Distance could not be set", 0)
+}
+
+
+
+/***** rng *****/
+static auto rng = bob::extension::VariableDoc(
+  "rng",
+  "str",
+  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
+  ""
+);
+PyObject* PyBobLearnEMKMeansTrainer_getRng(PyBobLearnEMKMeansTrainerObject* self, void*) {
+  BOB_TRY
+  //Allocating the correspondent python object
+  
+  PyBoostMt19937Object* retval =
+    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
+
+  retval->rng = self->cxx->getRng().get();
+  return Py_BuildValue("O", retval);
+  BOB_CATCH_MEMBER("Rng method could not be read", 0)
+}
+int PyBobLearnEMKMeansTrainer_setRng(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBoostMt19937_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
+    return -1;
+  }
+
+  PyBoostMt19937Object* boostObject = 0;
+  PyBoostMt19937_Converter(value, &boostObject);
+  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
+
+  return 0;
+  BOB_CATCH_MEMBER("Rng could not be set", 0)
+}
+
+
+
+static PyGetSetDef PyBobLearnEMKMeansTrainer_getseters[] = { 
+  {
+   initialization_method.name(),
+   (getter)PyBobLearnEMKMeansTrainer_getInitializationMethod,
+   (setter)PyBobLearnEMKMeansTrainer_setInitializationMethod,
+   initialization_method.doc(),
+   0
+  },
+  {
+   zeroeth_order_statistics.name(),
+   (getter)PyBobLearnEMKMeansTrainer_getZeroethOrderStatistics,
+   (setter)PyBobLearnEMKMeansTrainer_setZeroethOrderStatistics,
+   zeroeth_order_statistics.doc(),
+   0
+  },
+  {
+   first_order_statistics.name(),
+   (getter)PyBobLearnEMKMeansTrainer_getFirstOrderStatistics,
+   (setter)PyBobLearnEMKMeansTrainer_setFirstOrderStatistics,
+   first_order_statistics.doc(),
+   0
+  },
+  {
+   average_min_distance.name(),
+   (getter)PyBobLearnEMKMeansTrainer_getAverageMinDistance,
+   (setter)PyBobLearnEMKMeansTrainer_setAverageMinDistance,
+   average_min_distance.doc(),
+   0
+  },
+  {
+   rng.name(),
+   (getter)PyBobLearnEMKMeansTrainer_getRng,
+   (setter)PyBobLearnEMKMeansTrainer_setRng,
+   rng.doc(),
+   0
+  },
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialise the means randomly",
+  "Data is split into as many chunks as there are means, then each mean is set to a random example within each chunk.",
+  true
+)
+.add_prototype("kmeans_machine,data")
+.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMKMeansTrainer_initialize(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMKMeansMachineObject* kmeans_machine = 0;
+  PyBlitzArrayObject* data                          = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->initialize(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** eStep ***/
+static auto eStep = bob::extension::FunctionDoc(
+  "eStep",
+  "Compute the eStep, which is basically the distances ",
+  "Accumulate across the dataset:"
+  " -zeroeth and first order statistics"
+  " -average (Square Euclidean) distance from the closest mean",
+  true
+)
+.add_prototype("kmeans_machine,data")
+.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMKMeansTrainer_eStep(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = eStep.kwlist(0);
+
+  PyBobLearnEMKMeansMachineObject* kmeans_machine;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+  auto data_ = make_safe(data);
+
+  self->cxx->eStep(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+
+  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** mStep ***/
+static auto mStep = bob::extension::FunctionDoc(
+  "mStep",
+  "Updates the mean based on the statistics from the E-step",
+  0,
+  true
+)
+.add_prototype("kmeans_machine")
+.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object");
+static PyObject* PyBobLearnEMKMeansTrainer_mStep(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = mStep.kwlist(0);
+
+  PyBobLearnEMKMeansMachineObject* kmeans_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine)) Py_RETURN_NONE;
+
+  self->cxx->mStep(*kmeans_machine->cxx);
+
+  BOB_CATCH_MEMBER("cannot perform the mStep method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** computeLikelihood ***/
+static auto compute_likelihood = bob::extension::FunctionDoc(
+  "compute_likelihood",
+  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
+  0,
+  true
+)
+.add_prototype("kmeans_machine")
+.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object");
+static PyObject* PyBobLearnEMKMeansTrainer_compute_likelihood(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = compute_likelihood.kwlist(0);
+
+  PyBobLearnEMKMeansMachineObject* kmeans_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine)) Py_RETURN_NONE;
+
+  double value = self->cxx->computeLikelihood(*kmeans_machine->cxx);
+  return Py_BuildValue("d", value);
+
+  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
+}
+
+
+/*** reset_accumulators ***/
+static auto reset_accumulators = bob::extension::FunctionDoc(
+  "reset_accumulators",
+  "Reset the statistics accumulators to the correct size and a value of zero.",
+  0,
+  true
+)
+.add_prototype("kmeans_machine")
+.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object");
+static PyObject* PyBobLearnEMKMeansTrainer_reset_accumulators(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = reset_accumulators.kwlist(0);
+
+  PyBobLearnEMKMeansMachineObject* kmeans_machine;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine)) Py_RETURN_NONE;
+
+  bool value = self->cxx->resetAccumulators(*kmeans_machine->cxx);
+  return Py_BuildValue("b", value);
+
+  BOB_CATCH_MEMBER("cannot perform the reset_accumulators method", 0)
+}
+
+
+static PyMethodDef PyBobLearnEMKMeansTrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMKMeansTrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    eStep.name(),
+    (PyCFunction)PyBobLearnEMKMeansTrainer_eStep,
+    METH_VARARGS|METH_KEYWORDS,
+    eStep.doc()
+  },
+  {
+    mStep.name(),
+    (PyCFunction)PyBobLearnEMKMeansTrainer_mStep,
+    METH_VARARGS|METH_KEYWORDS,
+    mStep.doc()
+  },
+  {
+    compute_likelihood.name(),
+    (PyCFunction)PyBobLearnEMKMeansTrainer_compute_likelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_likelihood.doc()
+  },
+  {
+    reset_accumulators.name(),
+    (PyCFunction)PyBobLearnEMKMeansTrainer_reset_accumulators,
+    METH_VARARGS|METH_KEYWORDS,
+    reset_accumulators.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMKMeansTrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMKMeansTrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMKMeansTrainer_Type.tp_name = KMeansTrainer_doc.name();
+  PyBobLearnEMKMeansTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMKMeansTrainerObject);
+  PyBobLearnEMKMeansTrainer_Type.tp_flags = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance
+  PyBobLearnEMKMeansTrainer_Type.tp_doc = KMeansTrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMKMeansTrainer_Type.tp_new = PyType_GenericNew;
+  PyBobLearnEMKMeansTrainer_Type.tp_init = reinterpret_cast<initproc>(PyBobLearnEMKMeansTrainer_init);
+  PyBobLearnEMKMeansTrainer_Type.tp_dealloc = reinterpret_cast<destructor>(PyBobLearnEMKMeansTrainer_delete);
+  PyBobLearnEMKMeansTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMKMeansTrainer_RichCompare);
+  PyBobLearnEMKMeansTrainer_Type.tp_methods = PyBobLearnEMKMeansTrainer_methods;
+  PyBobLearnEMKMeansTrainer_Type.tp_getset = PyBobLearnEMKMeansTrainer_getseters;
+  PyBobLearnEMKMeansTrainer_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMKMeansTrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMKMeansTrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMKMeansTrainer_Type);
+  return PyModule_AddObject(module, "_KMeansTrainer", (PyObject*)&PyBobLearnEMKMeansTrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/linear_scoring.cpp b/bob/learn/em/linear_scoring.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ab5edd6252f1470ddfe26cec4e578528ff0748c4
--- /dev/null
+++ b/bob/learn/em/linear_scoring.cpp
@@ -0,0 +1,266 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Wed 05 Feb 16:10:48 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/*Convert a PyObject to a a list of GMMStats*/
+//template<class R, class P1, class P2>
+static int extract_gmmstats_list(PyObject *list,
+                             std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++){
+  
+    PyBobLearnEMGMMStatsObject* stats;
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
+      PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+      return -1;
+    }
+    training_data.push_back(stats->cxx);
+  }
+  return 0;
+}
+
+static int extract_gmmmachine_list(PyObject *list,
+                             std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> >& training_data)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++){
+  
+    PyBobLearnEMGMMMachineObject* stats;
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMMachine_Type, &stats)){
+      PyErr_Format(PyExc_RuntimeError, "Expected GMMMachine objects");
+      return -1;
+    }
+    training_data.push_back(stats->cxx);
+  }
+  return 0;
+}
+
+
+
+/*Convert a PyObject to a list of blitz Array*/
+template <int N>
+int extract_array_list(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
+{
+
+  if(list==0)
+    return 0;
+
+  for (int i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyBlitzArrayObject* blitz_object; 
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
+      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
+      return -1;
+    }
+    auto blitz_object_ = make_safe(blitz_object);
+    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
+  }
+  return 0;
+}
+
+/* converts PyObject to bool and returns false if object is NULL */
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}
+
+
+/*** linear_scoring ***/
+static auto linear_scoring1 = bob::extension::FunctionDoc(
+  "linear_scoring",
+  "",
+  0,
+  true
+)
+.add_prototype("models, ubm, test_stats, test_channelOffset, frame_length_normalisation", "output")
+.add_parameter("models", "list(:py:class:`bob.learn.em.GMMMachine`)", "")
+.add_parameter("ubm", ":py:class:`bob.learn.em.GMMMachine`", "")
+.add_parameter("test_stats", "list(:py:class:`bob.learn.em.GMMStats`)", "")
+.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
+.add_parameter("frame_length_normalisation", "bool", "")
+.add_return("output","array_like<float,1>","Score");
+
+
+static auto linear_scoring2 = bob::extension::FunctionDoc(
+  "linear_scoring",
+  "",
+  0,
+  true
+)
+.add_prototype("models, ubm_mean, ubm_variance, test_stats, test_channelOffset, frame_length_normalisation", "output")
+.add_parameter("models", "list(array_like<float,1>)", "")
+.add_parameter("ubm_mean", "list(array_like<float,1>)", "")
+.add_parameter("ubm_variance", "list(array_like<float,1>)", "")
+.add_parameter("test_stats", "list(:py:class:`bob.learn.em.GMMStats`)", "")
+.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
+.add_parameter("frame_length_normalisation", "bool", "")
+.add_return("output","array_like<float,1>","Score");
+
+
+
+static auto linear_scoring3 = bob::extension::FunctionDoc(
+  "linear_scoring",
+  "",
+  0,
+  true
+)
+.add_prototype("model, ubm_mean, ubm_variance, test_stats, test_channelOffset, frame_length_normalisation", "output")
+.add_parameter("model", "array_like<float,1>", "")
+.add_parameter("ubm_mean", "array_like<float,1>", "")
+.add_parameter("ubm_variance", "array_like<float,1>", "")
+.add_parameter("test_stats", ":py:class:`bob.learn.em.GMMStats`", "")
+.add_parameter("test_channelOffset", "array_like<float,1>", "")
+.add_parameter("frame_length_normalisation", "bool", "")
+.add_return("output","array_like<float,1>","Score");
+
+static PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
+    
+  //Cheking the number of arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+    
+  //Reading the first input argument
+  PyObject* arg = 0;
+  if (PyTuple_Size(args))
+    arg = PyTuple_GET_ITEM(args, 0);
+  else {
+    PyObject* tmp = PyDict_Values(kwargs);
+    auto tmp_ = make_safe(tmp);
+    arg = PyList_GET_ITEM(tmp, 0);
+  }
+  
+  //Checking the signature of the method (list of GMMMachine as input)
+  if ((PyList_Check(arg)) && PyBobLearnEMGMMMachine_Check(PyList_GetItem(arg, 0)) && (nargs >= 3) && (nargs<=5) ){
+  
+    char** kwlist = linear_scoring1.kwlist(0);
+
+    PyObject* gmm_list_o                 = 0;
+    PyBobLearnEMGMMMachineObject* ubm  = 0;
+    PyObject* stats_list_o               = 0;
+    PyObject* channel_offset_list_o      = 0;
+    PyObject* frame_length_normalisation = Py_False;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!O!|O!O!", kwlist, &PyList_Type, &gmm_list_o,
+                                                                       &PyBobLearnEMGMMMachine_Type, &ubm,
+                                                                       &PyList_Type, &stats_list_o,
+                                                                       &PyList_Type, &channel_offset_list_o,
+                                                                       &PyBool_Type, &frame_length_normalisation)){
+      linear_scoring1.print_usage();
+      Py_RETURN_NONE;
+    }
+
+    std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> > stats_list;
+    if(extract_gmmstats_list(stats_list_o ,stats_list)!=0)
+      Py_RETURN_NONE;
+
+    std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> > gmm_list;
+    if(extract_gmmmachine_list(gmm_list_o ,gmm_list)!=0)
+      Py_RETURN_NONE;
+
+    std::vector<blitz::Array<double,1> > channel_offset_list;
+    if(extract_array_list(channel_offset_list_o ,channel_offset_list)!=0)
+      Py_RETURN_NONE;
+
+    blitz::Array<double, 2> scores = blitz::Array<double, 2>(gmm_list.size(), stats_list.size());
+    if(channel_offset_list.size()==0)
+      bob::learn::em::linearScoring(gmm_list, *ubm->cxx, stats_list, f(frame_length_normalisation),scores);
+    else
+      bob::learn::em::linearScoring(gmm_list, *ubm->cxx, stats_list, channel_offset_list, f(frame_length_normalisation),scores);
+
+    return PyBlitzArrayCxx_AsConstNumpy(scores);
+  }
+
+  //Checking the signature of the method (list of arrays as input
+  else if ((PyList_Check(arg)) && PyArray_Check(PyList_GetItem(arg, 0)) && (nargs >= 4) && (nargs<=6) ){
+  
+    char** kwlist = linear_scoring2.kwlist(0);
+
+    PyObject* model_supervector_list_o        = 0;
+    PyBlitzArrayObject* ubm_means             = 0;
+    PyBlitzArrayObject* ubm_variances         = 0;
+    PyObject* stats_list_o                    = 0;
+    PyObject* channel_offset_list_o           = 0;
+    PyObject* frame_length_normalisation      = Py_False;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&O&O!|O!O!", kwlist, &PyList_Type, &model_supervector_list_o,
+                                                                       &PyBlitzArray_Converter, &ubm_means,
+                                                                       &PyBlitzArray_Converter, &ubm_variances,
+                                                                       &PyList_Type, &stats_list_o,
+                                                                       &PyList_Type, &channel_offset_list_o,
+                                                                       &PyBool_Type, &frame_length_normalisation)){
+      linear_scoring2.print_usage(); 
+      Py_RETURN_NONE;
+    }
+    
+    //protects acquired resources through this scope
+    auto ubm_means_ = make_safe(ubm_means);
+    auto ubm_variances_ = make_safe(ubm_variances);    
+
+    std::vector<blitz::Array<double,1> > model_supervector_list;
+    if(extract_array_list(model_supervector_list_o ,model_supervector_list)!=0)
+      Py_RETURN_NONE;
+
+    std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> > stats_list;
+    if(extract_gmmstats_list(stats_list_o ,stats_list)!=0)
+      Py_RETURN_NONE;
+
+    std::vector<blitz::Array<double,1> > channel_offset_list;
+    if(extract_array_list(channel_offset_list_o ,channel_offset_list)!=0)
+      Py_RETURN_NONE;
+
+    blitz::Array<double, 2> scores = blitz::Array<double, 2>(model_supervector_list.size(), stats_list.size());
+    if(channel_offset_list.size()==0)
+      bob::learn::em::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, f(frame_length_normalisation),scores);
+    else
+      bob::learn::em::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, channel_offset_list, f(frame_length_normalisation),scores);
+
+    return PyBlitzArrayCxx_AsConstNumpy(scores);
+  
+  }
+  
+  //Checking the signature of the method (list of arrays as input
+  else if (PyArray_Check(arg) && (nargs >= 5) && (nargs<=6) ){
+  
+    char** kwlist = linear_scoring3.kwlist(0);
+
+    PyBlitzArrayObject* model                 = 0;
+    PyBlitzArrayObject* ubm_means             = 0;
+    PyBlitzArrayObject* ubm_variances         = 0;
+    PyBobLearnEMGMMStatsObject* stats       = 0;
+    PyBlitzArrayObject* channel_offset        = 0;
+    PyObject* frame_length_normalisation      = Py_False;
+
+    if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O!O&|O!", kwlist, &PyBlitzArray_Converter, &model,
+                                                                       &PyBlitzArray_Converter, &ubm_means,
+                                                                       &PyBlitzArray_Converter, &ubm_variances,
+                                                                       &PyBobLearnEMGMMStats_Type, &stats,
+                                                                       &PyBlitzArray_Converter, &channel_offset,
+                                                                       &PyBool_Type, &frame_length_normalisation)){
+      linear_scoring3.print_usage(); 
+      Py_RETURN_NONE;
+    }
+    
+    //protects acquired resources through this scope
+    auto model_ = make_safe(model);
+    auto ubm_means_ = make_safe(ubm_means);
+    auto ubm_variances_ = make_safe(ubm_variances);
+    auto channel_offset_ = make_safe(channel_offset);
+
+    double score = bob::learn::em::linearScoring(*PyBlitzArrayCxx_AsBlitz<double,1>(model), *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), *stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(channel_offset), f(frame_length_normalisation));
+
+    return Py_BuildValue("d",score);
+  }
+
+  
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - linear_scoring requires 5 or 6 arguments, but you provided %d (see help)", nargs);
+    linear_scoring1.print_usage();
+    linear_scoring2.print_usage();
+    linear_scoring3.print_usage();
+    Py_RETURN_NONE;
+  }
+
+}
+
diff --git a/bob/learn/em/main.cpp b/bob/learn/em/main.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c166cffa58657a600cbd83dc5b2519fb9f473443
--- /dev/null
+++ b/bob/learn/em/main.cpp
@@ -0,0 +1,145 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Fri Nov 21 12:39:21 CET 2014
+ *
+ * @brief Bindings to bob::learn::em routines
+ */
+
+#ifdef NO_IMPORT_ARRAY
+#undef NO_IMPORT_ARRAY
+#endif
+#include "main.h"
+#include "ztnorm.cpp"
+#include "linear_scoring.cpp"
+
+
+static PyMethodDef module_methods[] = {
+  {
+    zt_norm.name(),
+    (PyCFunction)PyBobLearnEM_ztNorm,
+    METH_VARARGS|METH_KEYWORDS,
+    zt_norm.doc()
+  },
+  {
+    t_norm.name(),
+    (PyCFunction)PyBobLearnEM_tNorm,
+    METH_VARARGS|METH_KEYWORDS,
+    t_norm.doc()
+  },
+  {
+    z_norm.name(),
+    (PyCFunction)PyBobLearnEM_zNorm,
+    METH_VARARGS|METH_KEYWORDS,
+    z_norm.doc()
+  },
+  {
+    linear_scoring1.name(),
+    (PyCFunction)PyBobLearnEM_linear_scoring,
+    METH_VARARGS|METH_KEYWORDS,
+    linear_scoring1.doc()
+  },
+
+  {0}//Sentinel
+};
+
+
+PyDoc_STRVAR(module_docstr, "Bob EM based Machine Learning Routines");
+
+int PyBobLearnEM_APIVersion = BOB_LEARN_EM_API_VERSION;
+
+
+#if PY_VERSION_HEX >= 0x03000000
+static PyModuleDef module_definition = {
+  PyModuleDef_HEAD_INIT,
+  BOB_EXT_MODULE_NAME,
+  module_docstr,
+  -1,
+  module_methods,
+  0, 0, 0, 0
+};
+#endif
+
+static PyObject* create_module (void) {
+
+# if PY_VERSION_HEX >= 0x03000000
+  PyObject* module = PyModule_Create(&module_definition);
+# else
+  PyObject* module = Py_InitModule3(BOB_EXT_MODULE_NAME, module_methods, module_docstr);
+# endif
+  if (!module) return 0;
+  auto module_ = make_safe(module); ///< protects against early returns
+
+  if (PyModule_AddStringConstant(module, "__version__", BOB_EXT_MODULE_VERSION) < 0) return 0;
+  if (!init_BobLearnEMGaussian(module)) return 0;
+  if (!init_BobLearnEMGMMStats(module)) return 0;
+  if (!init_BobLearnEMGMMMachine(module)) return 0;
+  if (!init_BobLearnEMKMeansMachine(module)) return 0;
+  if (!init_BobLearnEMKMeansTrainer(module)) return 0;
+  //if (!init_BobLearnEMGMMBaseTrainer(module)) return 0;
+  if (!init_BobLearnEMMLGMMTrainer(module)) return 0;  
+  if (!init_BobLearnEMMAPGMMTrainer(module)) return 0;
+
+  if (!init_BobLearnEMJFABase(module)) return 0;
+  if (!init_BobLearnEMJFAMachine(module)) return 0;
+  if (!init_BobLearnEMJFATrainer(module)) return 0;
+
+  if (!init_BobLearnEMISVBase(module)) return 0;
+  if (!init_BobLearnEMISVMachine(module)) return 0;
+  if (!init_BobLearnEMISVTrainer(module)) return 0;
+
+  if (!init_BobLearnEMIVectorMachine(module)) return 0;
+  if (!init_BobLearnEMIVectorTrainer(module)) return 0;
+    
+  if (!init_BobLearnEMPLDABase(module)) return 0;
+  if (!init_BobLearnEMPLDAMachine(module)) return 0;
+  if (!init_BobLearnEMPLDATrainer(module)) return 0; 
+
+  if (!init_BobLearnEMEMPCATrainer(module)) return 0;  
+
+
+  static void* PyBobLearnEM_API[PyBobLearnEM_API_pointers];
+
+  /* exhaustive list of C APIs */
+
+  /**************
+   * Versioning *
+   **************/
+
+  PyBobLearnEM_API[PyBobLearnEM_APIVersion_NUM] = (void *)&PyBobLearnEM_APIVersion;
+
+
+#if PY_VERSION_HEX >= 0x02070000
+
+  /* defines the PyCapsule */
+
+  PyObject* c_api_object = PyCapsule_New((void *)PyBobLearnEM_API,
+      BOB_EXT_MODULE_PREFIX "." BOB_EXT_MODULE_NAME "._C_API", 0);
+
+#else
+
+  PyObject* c_api_object = PyCObject_FromVoidPtr((void *)PyBobLearnEM_API, 0);
+
+#endif
+
+  if (!c_api_object) return 0;
+
+  if (PyModule_AddObject(module, "_C_API", c_api_object) < 0) return 0;
+
+
+  /* imports bob.learn.em's C-API dependencies */
+  if (import_bob_blitz() < 0) return 0;
+  if (import_bob_core_random() < 0) return 0;
+  if (import_bob_io_base() < 0) return 0;
+  //if (import_bob_learn_linear() < 0) return 0;
+
+  Py_INCREF(module);
+  return module;
+
+}
+
+PyMODINIT_FUNC BOB_EXT_ENTRY_NAME (void) {
+# if PY_VERSION_HEX >= 0x03000000
+  return
+# endif
+    create_module();
+}
diff --git a/bob/learn/em/main.h b/bob/learn/em/main.h
new file mode 100644
index 0000000000000000000000000000000000000000..4afa17d3625ff099f4fd8eed079b5427f851f5e1
--- /dev/null
+++ b/bob/learn/em/main.h
@@ -0,0 +1,312 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Fri Nov 21 10:31:25 CET 2014
+ *
+ * @brief Header file for bindings to bob::learn::em
+ */
+
+#ifndef BOB_LEARN_EM_MAIN_H
+#define BOB_LEARN_EM_MAIN_H
+
+#include <bob.blitz/cppapi.h>
+#include <bob.blitz/cleanup.h>
+#include <bob.core/random_api.h>
+#include <bob.io.base/api.h>
+
+#include <bob.learn.linear/api.h>
+
+#include <bob.extension/documentation.h>
+
+#define BOB_LEARN_EM_MODULE
+#include <bob.learn.em/api.h>
+
+#include <bob.learn.em/Gaussian.h>
+#include <bob.learn.em/GMMStats.h>
+#include <bob.learn.em/GMMMachine.h>
+#include <bob.learn.em/KMeansMachine.h>
+
+#include <bob.learn.em/KMeansTrainer.h>
+//#include <bob.learn.em/GMMBaseTrainer.h>
+#include <bob.learn.em/ML_GMMTrainer.h>
+#include <bob.learn.em/MAP_GMMTrainer.h>
+
+#include <bob.learn.em/JFABase.h>
+#include <bob.learn.em/JFAMachine.h>
+#include <bob.learn.em/JFATrainer.h>
+
+#include <bob.learn.em/ISVBase.h>
+#include <bob.learn.em/ISVMachine.h>
+#include <bob.learn.em/ISVTrainer.h>
+
+
+#include <bob.learn.em/IVectorMachine.h>
+#include <bob.learn.em/IVectorTrainer.h>
+
+#include <bob.learn.em/EMPCATrainer.h>
+
+#include <bob.learn.em/PLDAMachine.h>
+#include <bob.learn.em/PLDATrainer.h>
+
+#include <bob.learn.em/ZTNorm.h>
+
+
+
+#if PY_VERSION_HEX >= 0x03000000
+#define PyInt_Check PyLong_Check
+#define PyInt_AS_LONG PyLong_AS_LONG
+#define PyString_Check PyUnicode_Check
+#define PyString_AS_STRING(x) PyBytes_AS_STRING(make_safe(PyUnicode_AsUTF8String(x)).get())
+#endif
+
+#define TRY try{
+
+#define CATCH(message,ret) }\
+  catch (std::exception& e) {\
+    PyErr_SetString(PyExc_RuntimeError, e.what());\
+    return ret;\
+  } \
+  catch (...) {\
+    PyErr_Format(PyExc_RuntimeError, "%s " message ": unknown exception caught", Py_TYPE(self)->tp_name);\
+    return ret;\
+  }
+
+#define CATCH_(message, ret) }\
+  catch (std::exception& e) {\
+    PyErr_SetString(PyExc_RuntimeError, e.what());\
+    return ret;\
+  } \
+  catch (...) {\
+    PyErr_Format(PyExc_RuntimeError, message ": unknown exception caught");\
+    return ret;\
+  }
+
+static inline char* c(const char* o){return const_cast<char*>(o);}  /* converts const char* to char* */
+
+/// inserts the given key, value pair into the given dictionaries
+static inline int insert_item_string(PyObject* dict, PyObject* entries, const char* key, Py_ssize_t value){
+  auto v = make_safe(Py_BuildValue("n", value));
+  if (PyDict_SetItemString(dict, key, v.get()) < 0) return -1;
+  return PyDict_SetItemString(entries, key, v.get());
+}
+
+// Gaussian
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::Gaussian> cxx;
+} PyBobLearnEMGaussianObject;
+
+extern PyTypeObject PyBobLearnEMGaussian_Type;
+bool init_BobLearnEMGaussian(PyObject* module);
+int PyBobLearnEMGaussian_Check(PyObject* o);
+
+
+// GMMStats
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::GMMStats> cxx;
+} PyBobLearnEMGMMStatsObject;
+
+extern PyTypeObject PyBobLearnEMGMMStats_Type;
+bool init_BobLearnEMGMMStats(PyObject* module);
+int PyBobLearnEMGMMStats_Check(PyObject* o);
+
+
+// GMMMachine
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::GMMMachine> cxx;
+} PyBobLearnEMGMMMachineObject;
+
+extern PyTypeObject PyBobLearnEMGMMMachine_Type;
+bool init_BobLearnEMGMMMachine(PyObject* module);
+int PyBobLearnEMGMMMachine_Check(PyObject* o);
+
+
+// KMeansMachine
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::KMeansMachine> cxx;
+} PyBobLearnEMKMeansMachineObject;
+
+extern PyTypeObject PyBobLearnEMKMeansMachine_Type;
+bool init_BobLearnEMKMeansMachine(PyObject* module);
+int PyBobLearnEMKMeansMachine_Check(PyObject* o);
+
+
+// KMeansTrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::KMeansTrainer> cxx;
+} PyBobLearnEMKMeansTrainerObject;
+
+extern PyTypeObject PyBobLearnEMKMeansTrainer_Type;
+bool init_BobLearnEMKMeansTrainer(PyObject* module);
+int PyBobLearnEMKMeansTrainer_Check(PyObject* o);
+
+
+// GMMBaseTrainer
+/*
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::GMMBaseTrainer> cxx;
+} PyBobLearnEMGMMBaseTrainerObject;
+
+extern PyTypeObject PyBobLearnEMGMMBaseTrainer_Type;
+bool init_BobLearnEMGMMBaseTrainer(PyObject* module);
+int PyBobLearnEMGMMBaseTrainer_Check(PyObject* o);
+*/
+
+// ML_GMMTrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::ML_GMMTrainer> cxx;
+} PyBobLearnEMMLGMMTrainerObject;
+
+extern PyTypeObject PyBobLearnEMMLGMMTrainer_Type;
+bool init_BobLearnEMMLGMMTrainer(PyObject* module);
+int PyBobLearnEMMLGMMTrainer_Check(PyObject* o);
+
+
+// MAP_GMMTrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::MAP_GMMTrainer> cxx;
+} PyBobLearnEMMAPGMMTrainerObject;
+
+extern PyTypeObject PyBobLearnEMMAPGMMTrainer_Type;
+bool init_BobLearnEMMAPGMMTrainer(PyObject* module);
+int PyBobLearnEMMAPGMMTrainer_Check(PyObject* o);
+
+
+// JFABase
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::JFABase> cxx;
+} PyBobLearnEMJFABaseObject;
+
+extern PyTypeObject PyBobLearnEMJFABase_Type;
+bool init_BobLearnEMJFABase(PyObject* module);
+int PyBobLearnEMJFABase_Check(PyObject* o);
+
+
+// ISVBase
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::ISVBase> cxx;
+} PyBobLearnEMISVBaseObject;
+
+extern PyTypeObject PyBobLearnEMISVBase_Type;
+bool init_BobLearnEMISVBase(PyObject* module);
+int PyBobLearnEMISVBase_Check(PyObject* o);
+
+
+// JFAMachine
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::JFAMachine> cxx;
+} PyBobLearnEMJFAMachineObject;
+
+extern PyTypeObject PyBobLearnEMJFAMachine_Type;
+bool init_BobLearnEMJFAMachine(PyObject* module);
+int PyBobLearnEMJFAMachine_Check(PyObject* o);
+
+// JFATrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::JFATrainer> cxx;
+} PyBobLearnEMJFATrainerObject;
+
+
+extern PyTypeObject PyBobLearnEMJFATrainer_Type;
+bool init_BobLearnEMJFATrainer(PyObject* module);
+int PyBobLearnEMJFATrainer_Check(PyObject* o);
+
+// ISVMachine
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::ISVMachine> cxx;
+} PyBobLearnEMISVMachineObject;
+
+extern PyTypeObject PyBobLearnEMISVMachine_Type;
+bool init_BobLearnEMISVMachine(PyObject* module);
+int PyBobLearnEMISVMachine_Check(PyObject* o);
+
+// ISVTrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::ISVTrainer> cxx;
+} PyBobLearnEMISVTrainerObject;
+
+extern PyTypeObject PyBobLearnEMISVTrainer_Type;
+bool init_BobLearnEMISVTrainer(PyObject* module);
+int PyBobLearnEMISVTrainer_Check(PyObject* o);
+
+// IVectorMachine
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::IVectorMachine> cxx;
+} PyBobLearnEMIVectorMachineObject;
+
+extern PyTypeObject PyBobLearnEMIVectorMachine_Type;
+bool init_BobLearnEMIVectorMachine(PyObject* module);
+int PyBobLearnEMIVectorMachine_Check(PyObject* o);
+
+
+// IVectorTrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::IVectorTrainer> cxx;
+} PyBobLearnEMIVectorTrainerObject;
+
+extern PyTypeObject PyBobLearnEMIVectorTrainer_Type;
+bool init_BobLearnEMIVectorTrainer(PyObject* module);
+int PyBobLearnEMIVectorTrainer_Check(PyObject* o);
+
+
+// PLDABase
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::PLDABase> cxx;
+} PyBobLearnEMPLDABaseObject;
+
+extern PyTypeObject PyBobLearnEMPLDABase_Type;
+bool init_BobLearnEMPLDABase(PyObject* module);
+int PyBobLearnEMPLDABase_Check(PyObject* o);
+
+
+// PLDAMachine
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::PLDAMachine> cxx;
+} PyBobLearnEMPLDAMachineObject;
+
+extern PyTypeObject PyBobLearnEMPLDAMachine_Type;
+bool init_BobLearnEMPLDAMachine(PyObject* module);
+int PyBobLearnEMPLDAMachine_Check(PyObject* o);
+
+
+// PLDATrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::PLDATrainer> cxx;
+} PyBobLearnEMPLDATrainerObject;
+
+extern PyTypeObject PyBobLearnEMPLDATrainer_Type;
+bool init_BobLearnEMPLDATrainer(PyObject* module);
+int PyBobLearnEMPLDATrainer_Check(PyObject* o);
+
+
+
+// EMPCATrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::em::EMPCATrainer> cxx;
+} PyBobLearnEMEMPCATrainerObject;
+
+extern PyTypeObject PyBobLearnEMEMPCATrainer_Type;
+bool init_BobLearnEMEMPCATrainer(PyObject* module);
+int PyBobLearnEMEMPCATrainer_Check(PyObject* o);
+
+
+
+#endif // BOB_LEARN_EM_MAIN_H
diff --git a/bob/learn/em/plda_base.cpp b/bob/learn/em/plda_base.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c30cab6d93d4bc800db6f16b8d53fc8286cd64d8
--- /dev/null
+++ b/bob/learn/em/plda_base.cpp
@@ -0,0 +1,1097 @@
+/**
+ * @date Thu Jan 29 15:44:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static auto PLDABase_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".PLDABase",
+
+  "This class is a container for the :math:`F` (between class variantion matrix), :math:`G` (within class variantion matrix) and :math:`\\Sigma` "
+  "matrices and the mean vector :math:`\\mu` of a PLDA model. This also"
+  "precomputes useful matrices to make the model scalable."
+  "References: [ElShafey2014,PrinceElder2007,LiFu2012]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+
+     "Constructor, builds a new PLDABase. :math:`F`, :math:`G` "
+     "and :math:`\\Sigma` are initialized to the 'eye' matrix (matrix with 1's "
+     "on the diagonal and 0 outside), and :math:`\\mu` is initialized to 0.",
+
+    "",
+    true
+  )
+  .add_prototype("dim_d,dim_f,dim_g,variance_threshold","")
+  .add_prototype("other","")
+  .add_prototype("hdf5","")
+
+  .add_parameter("dim_D", "int", "Dimensionality of the feature vector.")
+  .add_parameter("dim_F", "int", "Size of :math:`F`(between class variantion matrix).")
+  .add_parameter("dim_G", "int", "Size of :math:`G`(within class variantion matrix).")
+  .add_parameter("variance_threshold", "double", "The smallest possible value of the variance (Ignored if set to 0.)")
+  
+  .add_parameter("other", ":py:class:`bob.learn.em.PLDABase`", "A PLDABase object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMPLDABase_init_copy(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDABase_doc.kwlist(1);
+  PyBobLearnEMPLDABaseObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMPLDABase_Type, &o)){
+    PLDABase_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::PLDABase(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMPLDABase_init_hdf5(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDABase_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
+    PLDABase_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::PLDABase(*(config->f)));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMPLDABase_init_dim(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDABase_doc.kwlist(0);
+  
+  int dim_D, dim_F, dim_G = 1;
+  double variance_threshold = 0.0;
+
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iii|d", kwlist, &dim_D, &dim_F, &dim_G, &variance_threshold)){
+    PLDABase_doc.print_usage();
+    return -1;
+  }
+  
+  if(dim_D <= 0){
+    PyErr_Format(PyExc_TypeError, "dim_D argument must be greater than or equal to one");
+    return -1;
+  }
+  
+  if(dim_F <= 0){
+    PyErr_Format(PyExc_TypeError, "dim_F argument must be greater than or equal to one");
+    return -1;
+  }
+
+  if(dim_G <= 0){
+    PyErr_Format(PyExc_TypeError, "dim_G argument must be greater than or equal to one");
+    return -1;
+  }
+
+  if(variance_threshold < 0){
+    PyErr_Format(PyExc_TypeError, "variance_threshold argument must be greater than or equal to zero");
+    return -1;
+  }
+
+  
+  self->cxx.reset(new bob::learn::em::PLDABase(dim_D, dim_F, dim_G, variance_threshold));
+  return 0;
+}
+
+static int PyBobLearnEMPLDABase_init(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if(nargs==1){
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+
+    // If the constructor input is Gaussian object
+    if (PyBobLearnEMPLDABase_Check(arg))
+      return PyBobLearnEMPLDABase_init_copy(self, args, kwargs);
+    // If the constructor input is a HDF5
+    else if (PyBobIoHDF5File_Check(arg))
+      return PyBobLearnEMPLDABase_init_hdf5(self, args, kwargs);
+  }
+  else if((nargs==3)||(nargs==4))
+    return PyBobLearnEMPLDABase_init_dim(self, args, kwargs);
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1, 3 or 4 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+    PLDABase_doc.print_usage();
+    return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create PLDABase", 0)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMPLDABase_delete(PyBobLearnEMPLDABaseObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMPLDABase_RichCompare(PyBobLearnEMPLDABaseObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMPLDABase_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMPLDABaseObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare PLDABase objects", 0)
+}
+
+int PyBobLearnEMPLDABase_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMPLDABase_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int)",
+  "A tuple that represents the dimensionality of the feature vector :math:`dim_d`, the :math:`F` matrix and the :math:`G` matrix.",
+  ""
+);
+PyObject* PyBobLearnEMPLDABase_getShape(PyBobLearnEMPLDABaseObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i)", self->cxx->getDimD(), self->cxx->getDimF(), self->cxx->getDimG());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+
+/***** F *****/
+static auto F = bob::extension::VariableDoc(
+  "f",
+  "array_like <float, 2D>",
+  "Returns the :math:`F` matrix (between class variantion matrix)",
+  ""
+);
+PyObject* PyBobLearnEMPLDABase_getF(PyBobLearnEMPLDABaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getF());
+  BOB_CATCH_MEMBER("`f` could not be read", 0)
+}
+int PyBobLearnEMPLDABase_setF(PyBobLearnEMPLDABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, F.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "f");
+  if (!b) return -1;
+  self->cxx->setF(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`f` vector could not be set", -1)
+}
+
+/***** G *****/
+static auto G = bob::extension::VariableDoc(
+  "g",
+  "array_like <float, 2D>",
+  "Returns the :math:`G` matrix (between class variantion matrix)",
+  ""
+);
+PyObject* PyBobLearnEMPLDABase_getG(PyBobLearnEMPLDABaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getG());
+  BOB_CATCH_MEMBER("`g` could not be read", 0)
+}
+int PyBobLearnEMPLDABase_setG(PyBobLearnEMPLDABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, G.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "g");
+  if (!b) return -1;
+  self->cxx->setG(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`g` vector could not be set", -1)
+}
+
+
+/***** mu *****/
+static auto mu = bob::extension::VariableDoc(
+  "mu",
+  "array_like <float, 1D>",
+  "Gets the :math:`mu` mean vector of the PLDA model",
+  ""
+);
+PyObject* PyBobLearnEMPLDABase_getMu(PyBobLearnEMPLDABaseObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMu());
+  BOB_CATCH_MEMBER("`mu` could not be read", 0)
+}
+int PyBobLearnEMPLDABase_setMu(PyBobLearnEMPLDABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, mu.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mu");
+  if (!b) return -1;
+  self->cxx->setMu(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`mu` vector could not be set", -1)
+}
+
+
+/***** __isigma__ *****/
+static auto __isigma__ = bob::extension::VariableDoc(
+  "__isigma__",
+  "array_like <float, 1D>",
+  "Gets the inverse vector/diagonal matrix of :math:`\\Sigma^{-1}`",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getISigma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getISigma());
+  BOB_CATCH_MEMBER("__isigma__ could not be read", 0)
+}
+
+
+/***** __alpha__ *****/
+static auto __alpha__ = bob::extension::VariableDoc(
+  "__alpha__",
+  "array_like <float, 2D>",
+  "Gets the \f$\alpha\f$ matrix."
+  ":math:`\\alpha = (Id + G^T \\Sigma^{-1} G)^{-1} = \\mathcal{G}`",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getAlpha(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAlpha());
+  BOB_CATCH_MEMBER("__alpha__ could not be read", 0)
+}
+
+
+/***** __beta__ *****/
+static auto __beta__ = bob::extension::VariableDoc(
+  "__beta__",
+  "array_like <float, 2D>",
+  "Gets the :math:`\\beta` matrix "
+  ":math:`\\beta = (\\Sigma + G G^T)^{-1} = \\mathcal{S} = \\Sigma^{-1} - \\Sigma^{-1} G \\mathcal{G} G^{T} \\Sigma^{-1}`",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getBeta(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getBeta());
+  BOB_CATCH_MEMBER("__beta__ could not be read", 0)
+}
+
+
+/***** __ft_beta__ *****/
+static auto __ft_beta__ = bob::extension::VariableDoc(
+  "__ft_beta__",
+  "array_like <float, 2D>",
+  "Gets the :math:`F^T \\beta' matrix",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getFtBeta(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getFtBeta());
+  BOB_CATCH_MEMBER("__ft_beta__ could not be read", 0)
+}
+
+
+/***** __gt_i_sigma__ *****/
+static auto __gt_i_sigma__ = bob::extension::VariableDoc(
+  "__gt_i_sigma__",
+  "array_like <float, 2D>",
+  "Gets the :math:`G^T \\Sigma^{-1}` matrix",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getGtISigma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getGtISigma());
+  BOB_CATCH_MEMBER("__gt_i_sigma__ could not be read", 0)
+}
+
+
+/***** __logdet_alpha__ *****/
+static auto __logdet_alpha__ = bob::extension::VariableDoc(
+  "__logdet_alpha__",
+  "double",
+  "Gets :math:`\\log(\\det(\\alpha))`",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getLogDetAlpha(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getLogDetAlpha());
+  BOB_CATCH_MEMBER("__logdet_alpha__ could not be read", 0)
+}
+
+/***** __logdet_sigma__ *****/
+static auto __logdet_sigma__ = bob::extension::VariableDoc(
+  "__logdet_sigma__",
+  "double",
+  "Gets :math:`\\log(\\det(\\Sigma))`",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getLogDetSigma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getLogDetSigma());
+  BOB_CATCH_MEMBER("__logdet_sigma__ could not be read", 0)
+}
+
+
+/***** variance_threshold *****/
+static auto variance_threshold = bob::extension::VariableDoc(
+  "variance_threshold",
+  "double",
+  "",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getVarianceThreshold(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getVarianceThreshold());
+  BOB_CATCH_MEMBER("variance_threshold could not be read", 0)
+}
+int PyBobLearnEMPLDABase_setVarianceThreshold(PyBobLearnEMPLDABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an float", Py_TYPE(self)->tp_name, variance_threshold.name());
+    return -1;
+  }
+
+  self->cxx->setVarianceThreshold(PyFloat_AS_DOUBLE(value));
+  BOB_CATCH_MEMBER("variance_threshold could not be set", -1)
+  return 0;
+}
+
+
+
+
+/***** sigma *****/
+static auto sigma = bob::extension::VariableDoc(
+  "sigma",
+  "array_like <float, 1D>",
+  "Gets the :math:`\\sigma` (diagonal) covariance matrix of the PLDA model",
+  ""
+);
+static PyObject* PyBobLearnEMPLDABase_getSigma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getSigma());
+  BOB_CATCH_MEMBER("sigma could not be read", 0)
+}
+int PyBobLearnEMPLDABase_setSigma(PyBobLearnEMPLDABaseObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, sigma.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "sigma");
+  if (!b) return -1;
+  self->cxx->setSigma(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`sigma` vector could not be set", -1)
+}
+
+
+static PyGetSetDef PyBobLearnEMPLDABase_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMPLDABase_getShape,
+   0,
+   shape.doc(),
+   0
+  },  
+  {
+   F.name(),
+   (getter)PyBobLearnEMPLDABase_getF,
+   (setter)PyBobLearnEMPLDABase_setF,
+   F.doc(),
+   0
+  },
+  {
+   G.name(),
+   (getter)PyBobLearnEMPLDABase_getG,
+   (setter)PyBobLearnEMPLDABase_setG,
+   G.doc(),
+   0
+  },
+  {
+   mu.name(),
+   (getter)PyBobLearnEMPLDABase_getMu,
+   (setter)PyBobLearnEMPLDABase_setMu,
+   mu.doc(),
+   0
+  },
+  {
+   __isigma__.name(),
+   (getter)PyBobLearnEMPLDABase_getISigma,
+   0,
+   __isigma__.doc(),
+   0
+  },
+  {
+   __alpha__.name(),
+   (getter)PyBobLearnEMPLDABase_getAlpha,
+   0,
+   __alpha__.doc(),
+   0
+  },
+  {
+   __beta__.name(),
+   (getter)PyBobLearnEMPLDABase_getBeta,
+   0,
+   __beta__.doc(),
+   0
+  },
+  {
+  __ft_beta__.name(),
+   (getter)PyBobLearnEMPLDABase_getFtBeta,
+   0,
+   __ft_beta__.doc(),
+   0
+  },
+  {
+  __gt_i_sigma__.name(),
+   (getter)PyBobLearnEMPLDABase_getGtISigma,
+   0,
+   __gt_i_sigma__.doc(),
+   0
+  },
+  {
+  __logdet_alpha__.name(),
+   (getter)PyBobLearnEMPLDABase_getLogDetAlpha,
+   0,
+   __logdet_alpha__.doc(),
+   0
+  },
+  {
+  __logdet_sigma__.name(),
+   (getter)PyBobLearnEMPLDABase_getLogDetSigma,
+   0,
+   __logdet_sigma__.doc(),
+   0
+  },
+  {
+   sigma.name(),
+   (getter)PyBobLearnEMPLDABase_getSigma,
+   (setter)PyBobLearnEMPLDABase_setSigma,
+   sigma.doc(),
+   0
+  },
+  {
+   variance_threshold.name(),
+   (getter)PyBobLearnEMPLDABase_getVarianceThreshold,
+   (setter)PyBobLearnEMPLDABase_setVarianceThreshold,
+   variance_threshold.doc(),
+   0
+  },
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the PLDABase to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMPLDABase_Save(PyBobLearnEMPLDABaseObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the PLDABase to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMPLDABase_Load(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this PLDABase with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.PLDABase`", "A PLDABase object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMPLDABase_IsSimilarTo(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMPLDABaseObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMPLDABase_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/*** resize ***/
+static auto resize = bob::extension::FunctionDoc(
+  "resize",
+  "Resizes the dimensionality of the PLDA model. Paramaters :math:`\\mu`, :math:`\\F`, :math:`\\G` and :math:`\\Sigma` are reinitialized.",
+  0,
+  true
+)
+.add_prototype("dim_D,dim_F,dim_G")
+.add_parameter("dim_D", "int", "Dimensionality of the feature vector.")
+.add_parameter("dim_F", "int", "Size of :math:`F`(between class variantion matrix).")
+.add_parameter("dim_G", "int", "Size of :math:`F`(within class variantion matrix).");
+static PyObject* PyBobLearnEMPLDABase_resize(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = resize.kwlist(0);
+
+  int dim_D, dim_F, dim_G = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iii", kwlist, &dim_D, &dim_F, &dim_G)) Py_RETURN_NONE;
+
+  if(dim_D <= 0){
+    PyErr_Format(PyExc_TypeError, "dim_D argument must be greater than or equal to one");
+    Py_RETURN_NONE;
+  }
+  
+  if(dim_F <= 0){
+    PyErr_Format(PyExc_TypeError, "dim_F argument must be greater than or equal to one");
+    Py_RETURN_NONE;
+  }
+
+  if(dim_G <= 0){
+    PyErr_Format(PyExc_TypeError, "dim_G argument must be greater than or equal to one");
+    Py_RETURN_NONE;
+  }
+
+  self->cxx->resize(dim_D, dim_F, dim_G);
+
+  BOB_CATCH_MEMBER("cannot perform the resize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/***** get_gamma *****/
+static auto get_gamma = bob::extension::FunctionDoc(
+  "get_gamma",
+  "Gets the :math:`\\gamma_a` matrix for a given :math:`a` (number of samples). "
+  ":math:`gamma_{a} = (Id + a F^T \beta F)^{-1} = \\mathcal{F}_{a}`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","array_like <float, 2D>","Get the :math:`\\gamma` matrix");
+static PyObject* PyBobLearnEMPLDABase_getGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_gamma.kwlist(0);
+
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getGamma(i));
+  BOB_CATCH_MEMBER("`get_gamma` could not be read", 0)
+}
+
+
+/***** has_gamma *****/
+static auto has_gamma = bob::extension::FunctionDoc(
+  "has_gamma",
+  "Tells if the :math:`gamma_a` matrix for a given a (number of samples) exists. "
+  ":math:`gamma_a=(Id + a F^T \\beta F)^{-1}`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","bool","");
+static PyObject* PyBobLearnEMPLDABase_hasGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = has_gamma.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  if(self->cxx->hasGamma(i))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+ BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)    
+}
+
+
+/***** compute_gamma *****/
+static auto compute_gamma = bob::extension::FunctionDoc(
+  "compute_gamma",
+  "Tells if the :math:`gamma_a` matrix for a given a (number of samples) exists."
+  ":math:`gamma_a = (Id + a F^T \\beta F)^{-1}`",
+  0,
+  true
+)
+.add_prototype("a,res","")
+.add_parameter("a", "int", "Index")
+.add_parameter("res", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMPLDABase_computeGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = compute_gamma.kwlist(0);
+  int i = 0;
+  PyBlitzArrayObject* res = 0;  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &res)) Py_RETURN_NONE;
+
+  auto res_ = make_safe(res);  
+
+  self->cxx->computeGamma(i,*PyBlitzArrayCxx_AsBlitz<double,2>(res));
+  Py_RETURN_NONE;
+  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)    
+}
+
+/***** get_add_gamma *****/
+static auto get_add_gamma = bob::extension::FunctionDoc(
+  "get_add_gamma",
+   "Gets the :math:`gamma_a` matrix for a given :math:`f_a` (number of samples)."
+   ":math:`gamma_a = (Id + a F^T \\beta F)^{-1} = \\mathcal{F}_{a}`."
+   "Tries to find it from the base machine and then from this machine.",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","array_like <float, 2D>","");
+static PyObject* PyBobLearnEMPLDABase_getAddGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_add_gamma.kwlist(0);
+
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAddGamma(i));
+  BOB_CATCH_MEMBER("`get_add_gamma` could not be read", 0)
+}
+
+
+/***** has_log_like_const_term *****/
+static auto has_log_like_const_term = bob::extension::FunctionDoc(
+  "has_log_like_const_term",
+   "Tells if the log likelihood constant term for a given :math:`a` (number of samples) exists in this machine (does not check the base machine). "
+   ":math:`l_{a}=\\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","bool","");
+static PyObject* PyBobLearnEMPLDABase_hasLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = has_log_like_const_term.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  if(self->cxx->hasLogLikeConstTerm(i))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+ BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)    
+}
+
+
+/***** compute_log_like_const_term" *****/
+static auto compute_log_like_const_term = bob::extension::FunctionDoc(
+  "compute_log_like_const_term",
+  "Computes the log likelihood constant term for a given :math:`a` (number of samples), given the provided :math:`gamma_a` matrix. "
+  ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
+
+  0,
+  true
+)
+.add_prototype("a,res","")
+.add_parameter("a", "int", "Index")
+.add_parameter("res", "array_like <float, 2D>", "Input data");
+static PyObject* PyBobLearnEMPLDABase_computeLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = compute_log_like_const_term.kwlist(0);
+  int i = 0;
+  PyBlitzArrayObject* res = 0;  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &res)) Py_RETURN_NONE;
+
+  auto res_ = make_safe(res);  
+
+  self->cxx->computeLogLikeConstTerm(i,*PyBlitzArrayCxx_AsBlitz<double,2>(res));
+  Py_RETURN_NONE;
+  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)    
+}
+
+
+/***** get_add_log_like_const_term *****/
+static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
+  "get_add_log_like_const_term",
+
+   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
+   ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","double","");
+static PyObject* PyBobLearnEMPLDABase_getAddLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_add_log_like_const_term.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return Py_BuildValue("d",self->cxx->getAddLogLikeConstTerm(i));
+
+  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)    
+}
+
+
+/***** get_log_like_const_term *****/
+static auto get_log_like_const_term = bob::extension::FunctionDoc(
+  "get_log_like_const_term",
+   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
+    ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","double","");
+static PyObject* PyBobLearnEMPLDABase_getLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_log_like_const_term.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return Py_BuildValue("d",self->cxx->getLogLikeConstTerm(i));
+
+  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)    
+}
+
+/***** clear_maps *****/
+static auto clear_maps = bob::extension::FunctionDoc(
+  "clear_maps",
+  "Clears the maps (:math:`gamma_a` and loglike_constterm_a).",
+  0,
+  true
+);
+static PyObject* PyBobLearnEMPLDABase_clearMaps(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  self->cxx->clearMaps();
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)    
+}
+
+
+/***** compute_log_likelihood_point_estimate *****/
+static auto compute_log_likelihood_point_estimate = bob::extension::FunctionDoc(
+  "compute_log_likelihood_point_estimate",
+   "Gets the log-likelihood of an observation, given the current model and the latent variables (point estimate)."
+   "This will basically compute :math:`p(x_{ij} | h_{i}, w_{ij}, \\Theta)`, given by "
+   ":math:`\\mathcal{N}(x_{ij}|[\\mu + F h_{i} + G w_{ij} + \\epsilon_{ij}, \\Sigma])`, which is in logarithm, "
+   ":math:`\\frac{D}{2} log(2\\pi) -\\frac{1}{2} log(det(\\Sigma)) -\\frac{1}{2} {(x_{ij}-(\\mu+F h_{i}+G w_{ij}))^{T}\\Sigma^{-1}(x_{ij}-(\\mu+F h_{i}+G w_{ij}))}`",
+  0,
+  true
+)
+.add_prototype("xij,hi,wij","output")
+.add_parameter("xij", "array_like <float, 1D>", "")
+.add_parameter("hi", "array_like <float, 1D>", "")
+.add_parameter("wij", "array_like <float, 1D>", "")
+.add_return("output", "double", "");
+static PyObject* PyBobLearnEMPLDABase_computeLogLikelihoodPointEstimate(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = compute_log_likelihood_point_estimate.kwlist(0);
+  PyBlitzArrayObject* xij, *hi, *wij;  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&", kwlist, &PyBlitzArray_Converter, &xij,
+                                                               &PyBlitzArray_Converter, &hi,
+                                                               &PyBlitzArray_Converter, &wij)) return 0;
+
+  auto xij_ = make_safe(xij);
+  auto hi_ = make_safe(hi);
+  auto wij_ = make_safe(wij);  
+
+  return Py_BuildValue("d", self->cxx->computeLogLikelihoodPointEstimate(*PyBlitzArrayCxx_AsBlitz<double,1>(xij), *PyBlitzArrayCxx_AsBlitz<double,1>(hi), *PyBlitzArrayCxx_AsBlitz<double,1>(wij)));
+  
+  BOB_CATCH_MEMBER("`compute_log_likelihood_point_estimate` could not be read", 0)    
+}
+
+/***** __precompute__ *****/
+static auto __precompute__ = bob::extension::FunctionDoc(
+  "__precompute__",
+  "Precomputes useful values for the log likelihood "
+  ":math:`\\log(\\det(\\alpha))` and :math:`\\log(\\det(\\Sigma))`.",
+  0,
+  true
+);
+static PyObject* PyBobLearnEMPLDABase_precompute(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  self->cxx->precompute();
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("`precompute` could not be read", 0)    
+}
+
+
+/***** __precompute_log_like__ *****/
+static auto __precompute_log_like__ = bob::extension::FunctionDoc(
+  "__precompute_log_like__",
+
+  "Precomputes useful values for the log likelihood "
+  ":math:`\\log(\\det(\\alpha))` and :math:`\\log(\\det(\\Sigma))`.",
+
+  0,
+  true
+);
+static PyObject* PyBobLearnEMPLDABase_precomputeLogLike(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  self->cxx->precomputeLogLike();
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("`__precompute_log_like__` could not be read", 0)    
+}
+
+
+static PyMethodDef PyBobLearnEMPLDABase_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    resize.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_resize,
+    METH_VARARGS|METH_KEYWORDS,
+    resize.doc()
+  },
+  {
+    get_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_getGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    get_gamma.doc()
+  },
+  {
+    has_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_hasGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    has_gamma.doc()
+  },
+  {
+    compute_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_computeGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_gamma.doc()
+  },
+  {
+    get_add_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_getAddGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    get_add_gamma.doc()
+  },
+  {
+    has_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_hasLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    has_log_like_const_term.doc()
+  },  
+  {
+    compute_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_computeLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_log_like_const_term.doc()
+  },  
+  {
+    get_add_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_getAddLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    get_add_log_like_const_term.doc()
+  },  
+  {
+    get_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_getLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    get_log_like_const_term.doc()
+  },  
+  {
+    clear_maps.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_clearMaps,
+    METH_NOARGS,
+    clear_maps.doc()
+  },
+  {
+    compute_log_likelihood_point_estimate.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_computeLogLikelihoodPointEstimate,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_log_likelihood_point_estimate.doc()
+  },
+  {
+    __precompute__.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_precompute,
+    METH_NOARGS,
+    __precompute__.doc()
+  },   
+  {
+    __precompute_log_like__.name(),
+    (PyCFunction)PyBobLearnEMPLDABase_precomputeLogLike,
+    METH_NOARGS,
+    __precompute_log_like__.doc()
+  },     
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the JFA type struct; will be initialized later
+PyTypeObject PyBobLearnEMPLDABase_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMPLDABase(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMPLDABase_Type.tp_name      = PLDABase_doc.name();
+  PyBobLearnEMPLDABase_Type.tp_basicsize = sizeof(PyBobLearnEMPLDABaseObject);
+  PyBobLearnEMPLDABase_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMPLDABase_Type.tp_doc       = PLDABase_doc.doc();
+
+  // set the functions
+  PyBobLearnEMPLDABase_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMPLDABase_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMPLDABase_init);
+  PyBobLearnEMPLDABase_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMPLDABase_delete);
+  PyBobLearnEMPLDABase_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMPLDABase_RichCompare);
+  PyBobLearnEMPLDABase_Type.tp_methods     = PyBobLearnEMPLDABase_methods;
+  PyBobLearnEMPLDABase_Type.tp_getset      = PyBobLearnEMPLDABase_getseters;
+  //PyBobLearnEMPLDABase_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMPLDABase_forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMPLDABase_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMPLDABase_Type);
+  return PyModule_AddObject(module, "PLDABase", (PyObject*)&PyBobLearnEMPLDABase_Type) >= 0;
+}
+
diff --git a/bob/learn/em/plda_machine.cpp b/bob/learn/em/plda_machine.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..ec72f5685fab3a2a9cc831efcda407c35d094697
--- /dev/null
+++ b/bob/learn/em/plda_machine.cpp
@@ -0,0 +1,801 @@
+/**
+ * @date Thu Jan 30 11:10:15 2015 +0200
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
+
+static auto PLDAMachine_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".PLDAMachine",
+
+  "This class is a container for an enrolled identity/class. It contains information extracted from the enrollment samples. "
+  "It should be used in combination with a PLDABase instance."
+  "References: [ElShafey2014,PrinceElder2007,LiFu2012]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+
+     "Constructor, builds a new PLDAMachine.",
+
+    "",
+    true
+  )
+  .add_prototype("plda_base","")
+  .add_prototype("other","")
+  .add_prototype("hdf5,plda_base","")
+
+  .add_parameter("plda_base", "`bob.learn.em.PLDABase`", "")  
+  .add_parameter("other", ":py:class:`bob.learn.em.PLDAMachine`", "A PLDAMachine object to be copied.")
+  .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
+
+);
+
+
+static int PyBobLearnEMPLDAMachine_init_copy(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDAMachine_doc.kwlist(1);
+  PyBobLearnEMPLDAMachineObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMPLDAMachine_Type, &o)){
+    PLDAMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::PLDAMachine(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMPLDAMachine_init_hdf5(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDAMachine_doc.kwlist(2);
+
+  PyBobIoHDF5FileObject* config = 0;
+  PyBobLearnEMPLDABaseObject* plda_base;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBobIoHDF5File_Converter, &config,
+                                                                 &PyBobLearnEMPLDABase_Type, &plda_base)){
+    PLDAMachine_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::PLDAMachine(*(config->f),plda_base->cxx));
+
+  return 0;
+}
+
+
+static int PyBobLearnEMPLDAMachine_init_pldabase(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDAMachine_doc.kwlist(0);  
+  PyBobLearnEMPLDABaseObject* plda_base;
+  
+  //Here we have to select which keyword argument to read  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base)){
+    PLDAMachine_doc.print_usage();
+    return -1;
+  }
+  
+  self->cxx.reset(new bob::learn::em::PLDAMachine(plda_base->cxx));
+  return 0;
+}
+
+static int PyBobLearnEMPLDAMachine_init(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+ 
+  if(nargs==1){
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+
+    // If the constructor input is Gaussian object
+    if (PyBobLearnEMPLDAMachine_Check(arg))
+      return PyBobLearnEMPLDAMachine_init_copy(self, args, kwargs);
+    // If the constructor input is a HDF5
+    else if (PyBobLearnEMPLDABase_Check(arg))
+      return PyBobLearnEMPLDAMachine_init_pldabase(self, args, kwargs);
+  }
+  else if(nargs==2)
+    return PyBobLearnEMPLDAMachine_init_hdf5(self, args, kwargs);
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires 1 or 2 arguments, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+    PLDAMachine_doc.print_usage();
+    return -1;
+  }
+  BOB_CATCH_MEMBER("cannot create PLDAMachine", 0)
+  return 0;
+}
+
+
+
+static void PyBobLearnEMPLDAMachine_delete(PyBobLearnEMPLDAMachineObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+static PyObject* PyBobLearnEMPLDAMachine_RichCompare(PyBobLearnEMPLDAMachineObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMPLDAMachine_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMPLDAMachineObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare PLDAMachine objects", 0)
+}
+
+int PyBobLearnEMPLDAMachine_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMPLDAMachine_Type));
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+/***** shape *****/
+static auto shape = bob::extension::VariableDoc(
+  "shape",
+  "(int,int, int)",
+  "A tuple that represents the dimensionality of the feature vector :math:`dim_d`, the :math:`F` matrix and the :math:`G` matrix.",
+  ""
+);
+PyObject* PyBobLearnEMPLDAMachine_getShape(PyBobLearnEMPLDAMachineObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("(i,i,i)", self->cxx->getDimD(), self->cxx->getDimF(), self->cxx->getDimG());
+  BOB_CATCH_MEMBER("shape could not be read", 0)
+}
+
+
+/***** n_samples *****/
+static auto n_samples = bob::extension::VariableDoc(
+  "n_samples",
+  "int",
+  "Number of enrolled samples",
+  ""
+);
+static PyObject* PyBobLearnEMPLDAMachine_getNSamples(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return Py_BuildValue("i",self->cxx->getNSamples());
+  BOB_CATCH_MEMBER("n_samples could not be read", 0)
+}
+int PyBobLearnEMPLDAMachine_setNSamples(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyInt_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an int", Py_TYPE(self)->tp_name, n_samples.name());
+    return -1;
+  }
+
+  if (PyInt_AS_LONG(value) < 0){
+    PyErr_Format(PyExc_TypeError, "n_samples must be greater than or equal to zero");
+    return -1;
+  }
+
+  self->cxx->setNSamples(PyInt_AS_LONG(value));
+  BOB_CATCH_MEMBER("n_samples could not be set", -1)
+  return 0;
+}
+
+
+/***** w_sum_xit_beta_xi *****/
+static auto w_sum_xit_beta_xi = bob::extension::VariableDoc(
+  "w_sum_xit_beta_xi",
+  "double",
+  "Gets the :math:`A = -0.5 \\sum_{i} x_{i}^T \\beta x_{i}` value",
+  ""
+);
+static PyObject* PyBobLearnEMPLDAMachine_getWSumXitBetaXi(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getWSumXitBetaXi());
+  BOB_CATCH_MEMBER("w_sum_xit_beta_xi could not be read", 0)
+}
+int PyBobLearnEMPLDAMachine_setWSumXitBetaXi(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an float", Py_TYPE(self)->tp_name, w_sum_xit_beta_xi.name());
+    return -1;
+  }
+
+  self->cxx->setWSumXitBetaXi(PyFloat_AS_DOUBLE(value));
+  BOB_CATCH_MEMBER("w_sum_xit_beta_xi could not be set", -1)
+  return 0;
+}
+
+
+/***** plda_base *****/
+static auto plda_base = bob::extension::VariableDoc(
+  "plda_base",
+  ":py:class:`bob.learn.em.PLDABase`",
+  "The PLDABase attached to this machine",
+  ""
+);
+PyObject* PyBobLearnEMPLDAMachine_getPLDABase(PyBobLearnEMPLDAMachineObject* self, void*){
+  BOB_TRY
+
+  boost::shared_ptr<bob::learn::em::PLDABase> plda_base_o = self->cxx->getPLDABase();
+
+  //Allocating the correspondent python object
+  PyBobLearnEMPLDABaseObject* retval =
+    (PyBobLearnEMPLDABaseObject*)PyBobLearnEMPLDABase_Type.tp_alloc(&PyBobLearnEMPLDABase_Type, 0);
+  retval->cxx = plda_base_o;
+
+  return Py_BuildValue("O",retval);
+  BOB_CATCH_MEMBER("plda_base could not be read", 0)
+}
+int PyBobLearnEMPLDAMachine_setPLDABase(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyBobLearnEMPLDABase_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.PLDABase`", Py_TYPE(self)->tp_name, plda_base.name());
+    return -1;
+  }
+
+  PyBobLearnEMPLDABaseObject* plda_base_o = 0;
+  PyArg_Parse(value, "O!", &PyBobLearnEMPLDABase_Type,&plda_base_o);
+
+  self->cxx->setPLDABase(plda_base_o->cxx);
+
+  return 0;
+  BOB_CATCH_MEMBER("plda_base could not be set", -1)  
+}
+
+
+/***** weighted_sum *****/
+static auto weighted_sum = bob::extension::VariableDoc(
+  "weighted_sum",
+  "array_like <float, 1D>",
+  "Get/Set :math:``\\sum_{i} F^T \\beta x_{i}` value",
+  ""
+);
+static PyObject* PyBobLearnEMPLDAMachine_getWeightedSum(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getWeightedSum());
+  BOB_CATCH_MEMBER("weighted_sum could not be read", 0)
+}
+int PyBobLearnEMPLDAMachine_setWeightedSum(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, weighted_sum.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "weighted_sum");
+  if (!b) return -1;
+  self->cxx->setWeightedSum(*b);
+  return 0;
+  BOB_CATCH_MEMBER("`weighted_sum` vector could not be set", -1)
+}
+
+
+/***** log_likelihood *****/
+static auto log_likelihood = bob::extension::VariableDoc(
+  "log_likelihood",
+  "double",
+  "",
+  ""
+);
+static PyObject* PyBobLearnEMPLDAMachine_getLogLikelihood(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  return Py_BuildValue("d",self->cxx->getLogLikelihood());
+  BOB_CATCH_MEMBER("log_likelihood could not be read", 0)
+}
+int PyBobLearnEMPLDAMachine_setLogLikelihood(PyBobLearnEMPLDAMachineObject* self, PyObject* value, void*){
+  BOB_TRY
+
+  if (!PyNumber_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, log_likelihood.name());
+    return -1;
+  }
+
+  self->cxx->setLogLikelihood(PyFloat_AS_DOUBLE(value));
+  BOB_CATCH_MEMBER("log_likelihood could not be set", -1)
+  return 0;
+}
+
+
+static PyGetSetDef PyBobLearnEMPLDAMachine_getseters[] = { 
+  {
+   shape.name(),
+   (getter)PyBobLearnEMPLDAMachine_getShape,
+   0,
+   shape.doc(),
+   0
+  },  
+  {
+   n_samples.name(),
+   (getter)PyBobLearnEMPLDAMachine_getNSamples,
+   (setter)PyBobLearnEMPLDAMachine_setNSamples,
+   n_samples.doc(),
+   0
+  },  
+  {
+   w_sum_xit_beta_xi.name(),
+   (getter)PyBobLearnEMPLDAMachine_getWSumXitBetaXi,
+   (setter)PyBobLearnEMPLDAMachine_setWSumXitBetaXi,
+   w_sum_xit_beta_xi.doc(),
+   0
+  },
+  {
+   plda_base.name(),
+   (getter)PyBobLearnEMPLDAMachine_getPLDABase,
+   (setter)PyBobLearnEMPLDAMachine_setPLDABase,
+   plda_base.doc(),
+   0
+  },
+  {
+   weighted_sum.name(),
+   (getter)PyBobLearnEMPLDAMachine_getWeightedSum,
+   (setter)PyBobLearnEMPLDAMachine_setWeightedSum,
+   weighted_sum.doc(),
+   0
+  },
+  {
+   log_likelihood.name(),
+   (getter)PyBobLearnEMPLDAMachine_getLogLikelihood,
+   (setter)PyBobLearnEMPLDAMachine_setLogLikelihood,
+   log_likelihood.doc(),
+   0
+  },
+  {0}  // Sentinel
+};
+
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+
+/*** save ***/
+static auto save = bob::extension::FunctionDoc(
+  "save",
+  "Save the configuration of the PLDAMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
+static PyObject* PyBobLearnEMPLDAMachine_Save(PyBobLearnEMPLDAMachineObject* self,  PyObject* args, PyObject* kwargs) {
+
+  BOB_TRY
+  
+  // get list of arguments
+  char** kwlist = save.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+
+  auto hdf5_ = make_safe(hdf5);
+  self->cxx->save(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot save the data", 0)
+  Py_RETURN_NONE;
+}
+
+/*** load ***/
+static auto load = bob::extension::FunctionDoc(
+  "load",
+  "Load the configuration of the PLDAMachine to a given HDF5 file"
+)
+.add_prototype("hdf5")
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
+static PyObject* PyBobLearnEMPLDAMachine_Load(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = load.kwlist(0);  
+  PyBobIoHDF5FileObject* hdf5;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
+  
+  auto hdf5_ = make_safe(hdf5);  
+  self->cxx->load(*hdf5->f);
+
+  BOB_CATCH_MEMBER("cannot load the data", 0)
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this PLDAMachine with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.PLDAMachine`", "A PLDAMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMPLDAMachine_IsSimilarTo(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMPLDAMachineObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMPLDAMachine_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+/***** get_gamma *****/
+static auto get_gamma = bob::extension::FunctionDoc(
+  "get_gamma",
+  "Gets the :math:`\\gamma_a` matrix for a given :math:`a` (number of samples). "
+  ":math:`gamma_{a} = (Id + a F^T \beta F)^{-1} = \\mathcal{F}_{a}`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","array_like <float, 2D>","Get the :math:`\\gamma` matrix");
+static PyObject* PyBobLearnEMPLDAMachine_getGamma(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_gamma.kwlist(0);
+
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getGamma(i));
+  BOB_CATCH_MEMBER("`get_gamma` could not be read", 0)
+}
+
+
+/***** has_gamma *****/
+static auto has_gamma = bob::extension::FunctionDoc(
+  "has_gamma",
+  "Tells if the :math:`gamma_a` matrix for a given a (number of samples) exists. "
+  ":math:`gamma_a=(Id + a F^T \\beta F)^{-1}`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","bool","");
+static PyObject* PyBobLearnEMPLDAMachine_hasGamma(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = has_gamma.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  if(self->cxx->hasGamma(i))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+ BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)    
+}
+
+
+/***** get_add_gamma *****/
+static auto get_add_gamma = bob::extension::FunctionDoc(
+  "get_add_gamma",
+   "Gets the :math:`gamma_a` matrix for a given :math:`f_a` (number of samples)."
+   ":math:`gamma_a = (Id + a F^T \\beta F)^{-1} = \\mathcal{F}_{a}`."
+   "Tries to find it from the base machine and then from this machine.",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","array_like <float, 2D>","");
+static PyObject* PyBobLearnEMPLDAMachine_getAddGamma(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_add_gamma.kwlist(0);
+
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAddGamma(i));
+  BOB_CATCH_MEMBER("`get_add_gamma` could not be read", 0)
+}
+
+
+/***** has_log_like_const_term *****/
+static auto has_log_like_const_term = bob::extension::FunctionDoc(
+  "has_log_like_const_term",
+   "Tells if the log likelihood constant term for a given :math:`a` (number of samples) exists in this machine (does not check the base machine). "
+   ":math:`l_{a}=\\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","bool","");
+static PyObject* PyBobLearnEMPLDAMachine_hasLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = has_log_like_const_term.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  if(self->cxx->hasLogLikeConstTerm(i))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+ BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)    
+}
+
+
+/***** get_add_log_like_const_term *****/
+static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
+  "get_add_log_like_const_term",
+
+   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
+   ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)`",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","double","");
+static PyObject* PyBobLearnEMPLDAMachine_getAddLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_add_log_like_const_term.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return Py_BuildValue("d",self->cxx->getAddLogLikeConstTerm(i));
+
+  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)    
+}
+
+
+/***** get_log_like_const_term *****/
+static auto get_log_like_const_term = bob::extension::FunctionDoc(
+  "get_log_like_const_term",
+   "Gets the log likelihood constant term for a given :math:`a` (number of samples). "
+    ":math:`l_{a} = \\frac{a}{2} ( -D log(2\\pi) -log|\\Sigma| +log|\\alpha| +log|\\gamma_a|)",
+  0,
+  true
+)
+.add_prototype("a","output")
+.add_parameter("a", "int", "Index")
+.add_return("output","double","");
+static PyObject* PyBobLearnEMPLDAMachine_getLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = get_log_like_const_term.kwlist(0);
+  int i = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) Py_RETURN_NONE;
+
+  return Py_BuildValue("d",self->cxx->getLogLikeConstTerm(i));
+
+  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)    
+}
+
+/***** clear_maps *****/
+static auto clear_maps = bob::extension::FunctionDoc(
+  "clear_maps",
+  "Clears the maps (:math:`gamma_a` and loglike_constterm_a).",
+  0,
+  true
+);
+static PyObject* PyBobLearnEMPLDAMachine_clearMaps(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  self->cxx->clearMaps();
+  Py_RETURN_NONE;
+
+  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)    
+}
+
+
+/***** compute_log_likelihood *****/
+static auto compute_log_likelihood = bob::extension::FunctionDoc(
+  "compute_log_likelihood",
+  "Compute the log-likelihood of the given sample and (optionally) the enrolled samples",
+  0,
+  true
+)
+.add_prototype("sample,with_enrolled_samples","output")
+.add_parameter("sample", "array_like <float, 1D>", "Sample")
+.add_parameter("with_enrolled_samples", "bool", "")
+.add_return("output","double","The log-likelihood");
+static PyObject* PyBobLearnEMPLDAMachine_computeLogLikelihood(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = compute_log_likelihood.kwlist(0);
+
+  PyBlitzArrayObject* samples;
+  PyObject* with_enrolled_samples = Py_True;
+  
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&|O!", kwlist, &PyBlitzArray_Converter, &samples,
+                                                                 &PyBool_Type, &with_enrolled_samples)) Py_RETURN_NONE;
+  auto samples_ = make_safe(samples);
+
+  blitz::Array<double,2>  blitz_test = *PyBlitzArrayCxx_AsBlitz<double,2>(samples);
+
+   //There are 2 methods in C++, one <double,1> and the another <double,2>
+  if (blitz_test.extent(1)==0)
+    return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(samples), f(with_enrolled_samples)));
+  else
+    return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,2>(samples), f(with_enrolled_samples)));
+
+  BOB_CATCH_MEMBER("`compute_log_likelihood` could not be read", 0)    
+}
+
+
+/***** forward *****/
+static auto forward = bob::extension::FunctionDoc(
+  "forward",
+  "Computes a log likelihood ratio from a 1D or 2D blitz::Array",
+  0,
+  true
+)
+.add_prototype("samples","output")
+.add_parameter("samples", "array_like <float, 1D>", "Sample")
+.add_return("output","double","The log-likelihood ratio");
+static PyObject* PyBobLearnEMPLDAMachine_forward(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+  
+  char** kwlist = forward.kwlist(0);
+
+  PyBlitzArrayObject* samples;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &samples)) Py_RETURN_NONE;
+  auto samples_ = make_safe(samples);
+  blitz::Array<double,2>  blitz_test = *PyBlitzArrayCxx_AsBlitz<double,2>(samples);
+
+   //There are 2 methods in C++, one <double,1> and the another <double,2>
+  if (blitz_test.extent(1)==0)
+    return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,1>(samples)));
+  else
+    return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,2>(samples)));
+
+  BOB_CATCH_MEMBER("`forward` could not be read", 0)    
+}
+
+
+static PyMethodDef PyBobLearnEMPLDAMachine_methods[] = {
+  {
+    save.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_Save,
+    METH_VARARGS|METH_KEYWORDS,
+    save.doc()
+  },
+  {
+    load.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_Load,
+    METH_VARARGS|METH_KEYWORDS,
+    load.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {
+    get_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_getGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    get_gamma.doc()
+  },
+  {
+    has_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_hasGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    has_gamma.doc()
+  },
+  {
+    get_add_gamma.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_getAddGamma,
+    METH_VARARGS|METH_KEYWORDS,
+    get_add_gamma.doc()
+  },
+  {
+    has_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_hasLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    has_log_like_const_term.doc()
+  },  
+  {
+    get_add_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_getAddLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    get_add_log_like_const_term.doc()
+  },
+  {
+    get_log_like_const_term.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_getLogLikeConstTerm,
+    METH_VARARGS|METH_KEYWORDS,
+    get_log_like_const_term.doc()
+  },  
+  {
+    clear_maps.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_clearMaps,
+    METH_NOARGS,
+    clear_maps.doc()
+  },
+  {
+    compute_log_likelihood.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_computeLogLikelihood,
+    METH_VARARGS|METH_KEYWORDS,
+    compute_log_likelihood.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the JFA type struct; will be initialized later
+PyTypeObject PyBobLearnEMPLDAMachine_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMPLDAMachine(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMPLDAMachine_Type.tp_name      = PLDAMachine_doc.name();
+  PyBobLearnEMPLDAMachine_Type.tp_basicsize = sizeof(PyBobLearnEMPLDAMachineObject);
+  PyBobLearnEMPLDAMachine_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnEMPLDAMachine_Type.tp_doc       = PLDAMachine_doc.doc();
+
+  // set the functions
+  PyBobLearnEMPLDAMachine_Type.tp_new         = PyType_GenericNew;
+  PyBobLearnEMPLDAMachine_Type.tp_init        = reinterpret_cast<initproc>(PyBobLearnEMPLDAMachine_init);
+  PyBobLearnEMPLDAMachine_Type.tp_dealloc     = reinterpret_cast<destructor>(PyBobLearnEMPLDAMachine_delete);
+  PyBobLearnEMPLDAMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMPLDAMachine_RichCompare);
+  PyBobLearnEMPLDAMachine_Type.tp_methods     = PyBobLearnEMPLDAMachine_methods;
+  PyBobLearnEMPLDAMachine_Type.tp_getset      = PyBobLearnEMPLDAMachine_getseters;
+  PyBobLearnEMPLDAMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMPLDAMachine_forward);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMPLDAMachine_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMPLDAMachine_Type);
+  return PyModule_AddObject(module, "PLDAMachine", (PyObject*)&PyBobLearnEMPLDAMachine_Type) >= 0;
+}
+
diff --git a/bob/learn/em/plda_trainer.cpp b/bob/learn/em/plda_trainer.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..3144fbccce27f979050e5c3347d0782d9873814a
--- /dev/null
+++ b/bob/learn/em/plda_trainer.cpp
@@ -0,0 +1,704 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Wed 04 Feb 14:15:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+#include <boost/make_shared.hpp>
+
+//Defining maps for each initializatio method
+static const std::map<std::string, bob::learn::em::PLDATrainer::InitFMethod> FMethod = {{"RANDOM_F",  bob::learn::em::PLDATrainer::RANDOM_F}, {"BETWEEN_SCATTER", bob::learn::em::PLDATrainer::BETWEEN_SCATTER}};
+
+static const std::map<std::string, bob::learn::em::PLDATrainer::InitGMethod> GMethod = {{"RANDOM_G",  bob::learn::em::PLDATrainer::RANDOM_G}, {"WITHIN_SCATTER", bob::learn::em::PLDATrainer::WITHIN_SCATTER}};
+
+static const std::map<std::string, bob::learn::em::PLDATrainer::InitSigmaMethod> SigmaMethod = {{"RANDOM_SIGMA",  bob::learn::em::PLDATrainer::RANDOM_SIGMA}, {"VARIANCE_G", bob::learn::em::PLDATrainer::VARIANCE_G}, {"CONSTANT", bob::learn::em::PLDATrainer::CONSTANT}, {"VARIANCE_DATA", bob::learn::em::PLDATrainer::VARIANCE_DATA}};
+
+
+
+//String to type
+static inline bob::learn::em::PLDATrainer::InitFMethod string2FMethod(const std::string& o){
+  auto it = FMethod.find(o);
+  if (it == FMethod.end()) throw std::runtime_error("The given FMethod '" + o + "' is not known; choose one of ('RANDOM_F','BETWEEN_SCATTER')");
+  else return it->second;
+}
+
+static inline bob::learn::em::PLDATrainer::InitGMethod string2GMethod(const std::string& o){
+  auto it = GMethod.find(o);
+  if (it == GMethod.end()) throw std::runtime_error("The given GMethod '" + o + "' is not known; choose one of ('RANDOM_G','WITHIN_SCATTER')");
+  else return it->second;
+}
+
+static inline bob::learn::em::PLDATrainer::InitSigmaMethod string2SigmaMethod(const std::string& o){
+  auto it = SigmaMethod.find(o);
+  if (it == SigmaMethod.end()) throw std::runtime_error("The given SigmaMethod '" + o + "' is not known; choose one of ('RANDOM_SIGMA','VARIANCE_G', 'CONSTANT', 'VARIANCE_DATA')");
+  else return it->second;
+}
+
+//Type to string
+static inline const std::string& FMethod2string(bob::learn::em::PLDATrainer::InitFMethod o){
+  for (auto it = FMethod.begin(); it != FMethod.end(); ++it) if (it->second == o) return it->first;
+  throw std::runtime_error("The given FMethod type is not known");
+}
+
+static inline const std::string& GMethod2string(bob::learn::em::PLDATrainer::InitGMethod o){
+  for (auto it = GMethod.begin(); it != GMethod.end(); ++it) if (it->second == o) return it->first;
+  throw std::runtime_error("The given GMethod type is not known");
+}
+
+static inline const std::string& SigmaMethod2string(bob::learn::em::PLDATrainer::InitSigmaMethod o){
+  for (auto it = SigmaMethod.begin(); it != SigmaMethod.end(); ++it) if (it->second == o) return it->first;
+  throw std::runtime_error("The given SigmaMethod type is not known");
+}
+
+
+static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
+
+template <int N>
+int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
+{
+  for (int i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyBlitzArrayObject* blitz_object; 
+    if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
+      PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
+      return -1;
+    }
+    auto blitz_object_ = make_safe(blitz_object);
+    vec.push_back(*PyBlitzArrayCxx_AsBlitz<double,N>(blitz_object));
+  }
+  return 0;
+}
+
+
+template <int N>
+static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
+{
+  PyObject* list = PyList_New(vec.size());
+  for(size_t i=0; i<vec.size(); i++){
+    blitz::Array<double,N> numpy_array = vec[i];
+    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
+    PyList_SET_ITEM(list, i, numpy_py_object);
+  }
+  return list;
+}
+
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+
+static auto PLDATrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".PLDATrainer",
+  "This class can be used to train the :math:`$F$`, :math:`$G$ and "
+  " :math:`$\\Sigma$` matrices and the mean vector :math:`$\\mu$` of a PLDA model."
+  "References: [ElShafey2014,PrinceElder2007,LiFu2012]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Default constructor.\n Initializes a new PLDA trainer. The "
+    "training stage will place the resulting components in the "
+    "PLDABase.",
+    "",
+    true
+  )
+  .add_prototype("use_sum_second_order","")
+  .add_prototype("other","")
+  .add_prototype("","")
+
+  .add_parameter("other", ":py:class:`bob.learn.em.PLDATrainer`", "A PLDATrainer object to be copied.")
+  .add_parameter("use_sum_second_order", "bool", "")
+);
+
+static int PyBobLearnEMPLDATrainer_init_copy(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDATrainer_doc.kwlist(1);
+  PyBobLearnEMPLDATrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMPLDATrainer_Type, &o)){
+    PLDATrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::em::PLDATrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnEMPLDATrainer_init_bool(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = PLDATrainer_doc.kwlist(0);
+  PyObject* use_sum_second_order;
+
+  //Parsing the input argments
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBool_Type, &use_sum_second_order))
+    return -1;
+
+  self->cxx.reset(new bob::learn::em::PLDATrainer(f(use_sum_second_order)));
+  return 0;
+}
+
+
+static int PyBobLearnEMPLDATrainer_init(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if(nargs==1){
+    //Reading the input argument
+    PyObject* arg = 0;
+    if (PyTuple_Size(args))
+      arg = PyTuple_GET_ITEM(args, 0);
+    else {
+      PyObject* tmp = PyDict_Values(kwargs);
+      auto tmp_ = make_safe(tmp);
+      arg = PyList_GET_ITEM(tmp, 0);
+    }
+      
+    if(PyBobLearnEMPLDATrainer_Check(arg))
+      // If the constructor input is PLDATrainer object
+      return PyBobLearnEMPLDATrainer_init_copy(self, args, kwargs);
+    else
+      return PyBobLearnEMPLDATrainer_init_bool(self, args, kwargs);
+  }
+  else{
+    PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 or 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+    PLDATrainer_doc.print_usage();
+    return -1;
+  }
+
+  BOB_CATCH_MEMBER("cannot create PLDATrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnEMPLDATrainer_delete(PyBobLearnEMPLDATrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnEMPLDATrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMPLDATrainer_Type));
+}
+
+
+static PyObject* PyBobLearnEMPLDATrainer_RichCompare(PyBobLearnEMPLDATrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnEMPLDATrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnEMPLDATrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare PLDATrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+static auto z_second_order = bob::extension::VariableDoc(
+  "z_second_order",
+  "array_like <float, 3D>",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_get_z_second_order(PyBobLearnEMPLDATrainerObject* self, void*){
+  BOB_TRY
+  //return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZSecondOrder());
+  return vector_as_list(self->cxx->getZSecondOrder());
+  BOB_CATCH_MEMBER("z_second_order could not be read", 0)
+}
+
+
+static auto z_second_order_sum = bob::extension::VariableDoc(
+  "z_second_order_sum",
+  "array_like <float, 2D>",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_get_z_second_order_sum(PyBobLearnEMPLDATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZSecondOrderSum());
+  BOB_CATCH_MEMBER("z_second_order_sum could not be read", 0)
+}
+
+
+static auto z_first_order = bob::extension::VariableDoc(
+  "z_first_order",
+  "array_like <float, 2D>",
+  "",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_get_z_first_order(PyBobLearnEMPLDATrainerObject* self, void*){
+  BOB_TRY
+  //return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getZFirstOrder());
+  return vector_as_list(self->cxx->getZFirstOrder());
+  BOB_CATCH_MEMBER("z_first_order could not be read", 0)
+}
+
+
+/***** rng *****/
+static auto rng = bob::extension::VariableDoc(
+  "rng",
+  "str",
+  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_getRng(PyBobLearnEMPLDATrainerObject* self, void*) {
+  BOB_TRY
+  //Allocating the correspondent python object
+  
+  PyBoostMt19937Object* retval =
+    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
+
+  retval->rng = self->cxx->getRng().get();
+  return Py_BuildValue("O", retval);
+  BOB_CATCH_MEMBER("Rng method could not be read", 0)
+}
+int PyBobLearnEMPLDATrainer_setRng(PyBobLearnEMPLDATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBoostMt19937_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
+    return -1;
+  }
+
+  PyBoostMt19937Object* rng_object = 0;
+  PyArg_Parse(value, "O!", &PyBoostMt19937_Type, &rng_object);
+  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)rng_object->rng);
+
+  return 0;
+  BOB_CATCH_MEMBER("Rng could not be set", 0)
+}
+
+
+/***** init_f_method *****/
+static auto init_f_method = bob::extension::VariableDoc(
+  "init_f_method",
+  "str",
+  "The method used for the initialization of :math:`$F$`.",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_getFMethod(PyBobLearnEMPLDATrainerObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("s", FMethod2string(self->cxx->getInitFMethod()).c_str());
+  BOB_CATCH_MEMBER("init_f_method method could not be read", 0)
+}
+int PyBobLearnEMPLDATrainer_setFMethod(PyBobLearnEMPLDATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyString_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, init_f_method.name());
+    return -1;
+  }
+  self->cxx->setInitFMethod(string2FMethod(PyString_AS_STRING(value)));
+
+  return 0;
+  BOB_CATCH_MEMBER("init_f_method method could not be set", 0)
+}
+
+
+/***** init_g_method *****/
+static auto init_g_method = bob::extension::VariableDoc(
+  "init_g_method",
+  "str",
+  "The method used for the initialization of :math:`$G$`.",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_getGMethod(PyBobLearnEMPLDATrainerObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("s", GMethod2string(self->cxx->getInitGMethod()).c_str());
+  BOB_CATCH_MEMBER("init_g_method method could not be read", 0)
+}
+int PyBobLearnEMPLDATrainer_setGMethod(PyBobLearnEMPLDATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyString_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, init_g_method.name());
+    return -1;
+  }
+  self->cxx->setInitGMethod(string2GMethod(PyString_AS_STRING(value)));
+
+  return 0;
+  BOB_CATCH_MEMBER("init_g_method method could not be set", 0)
+}
+
+
+/***** init_sigma_method *****/
+static auto init_sigma_method = bob::extension::VariableDoc(
+  "init_sigma_method",
+  "str",
+  "The method used for the initialization of :math:`$\\Sigma$`.",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_getSigmaMethod(PyBobLearnEMPLDATrainerObject* self, void*) {
+  BOB_TRY
+  return Py_BuildValue("s", SigmaMethod2string(self->cxx->getInitSigmaMethod()).c_str());
+  BOB_CATCH_MEMBER("init_sigma_method method could not be read", 0)
+}
+int PyBobLearnEMPLDATrainer_setSigmaMethod(PyBobLearnEMPLDATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyString_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, init_sigma_method.name());
+    return -1;
+  }
+  self->cxx->setInitSigmaMethod(string2SigmaMethod(PyString_AS_STRING(value)));
+
+  return 0;
+  BOB_CATCH_MEMBER("init_sigma_method method could not be set", 0)
+}
+
+
+static PyGetSetDef PyBobLearnEMPLDATrainer_getseters[] = { 
+  {
+   z_first_order.name(),
+   (getter)PyBobLearnEMPLDATrainer_get_z_first_order,
+   0,
+   z_first_order.doc(),
+   0
+  },
+  {
+   z_second_order_sum.name(),
+   (getter)PyBobLearnEMPLDATrainer_get_z_second_order_sum,
+   0,
+   z_second_order_sum.doc(),
+   0
+  },
+  {
+   z_second_order.name(),
+   (getter)PyBobLearnEMPLDATrainer_get_z_second_order,
+   0,
+   z_second_order.doc(),
+   0
+  },
+  {
+   rng.name(),
+   (getter)PyBobLearnEMPLDATrainer_getRng,
+   (setter)PyBobLearnEMPLDATrainer_setRng,
+   rng.doc(),
+   0
+  },
+  {
+   init_f_method.name(),
+   (getter)PyBobLearnEMPLDATrainer_getFMethod,
+   (setter)PyBobLearnEMPLDATrainer_setFMethod,
+   init_f_method.doc(),
+   0
+  },
+  {
+   init_g_method.name(),
+   (getter)PyBobLearnEMPLDATrainer_getGMethod,
+   (setter)PyBobLearnEMPLDATrainer_setGMethod,
+   init_g_method.doc(),
+   0
+  },
+  {
+   init_sigma_method.name(),
+   (getter)PyBobLearnEMPLDATrainer_getSigmaMethod,
+   (setter)PyBobLearnEMPLDATrainer_setSigmaMethod,
+   init_sigma_method.doc(),
+   0
+  },  
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("plda_base,data")
+.add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "PLDAMachine Object")
+.add_parameter("data", "list", "");
+static PyObject* PyBobLearnEMPLDATrainer_initialize(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnEMPLDABaseObject* plda_base = 0;
+  PyObject* data = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base,
+                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
+
+  std::vector<blitz::Array<double,2> > data_vector;
+  if(list_as_vector(data ,data_vector)==0)
+    self->cxx->initialize(*plda_base->cxx, data_vector);
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step ***/
+static auto e_step = bob::extension::FunctionDoc(
+  "e_step",
+  "e_step before the EM steps",
+  "",
+  true
+)
+.add_prototype("plda_base,data")
+.add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "PLDAMachine Object")
+.add_parameter("data", "list", "");
+static PyObject* PyBobLearnEMPLDATrainer_e_step(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = e_step.kwlist(0);
+
+  PyBobLearnEMPLDABaseObject* plda_base = 0;
+  PyObject* data = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base,
+                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
+
+  std::vector<blitz::Array<double,2> > data_vector;
+  if(list_as_vector(data ,data_vector)==0)
+    self->cxx->eStep(*plda_base->cxx, data_vector);
+
+  BOB_CATCH_MEMBER("cannot perform the e_step method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step ***/
+static auto m_step = bob::extension::FunctionDoc(
+  "m_step",
+  "m_step before the EM steps",
+  "",
+  true
+)
+.add_prototype("plda_base,data")
+.add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "PLDAMachine Object")
+.add_parameter("data", "list", "");
+static PyObject* PyBobLearnEMPLDATrainer_m_step(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = m_step.kwlist(0);
+
+  PyBobLearnEMPLDABaseObject* plda_base = 0;
+  PyObject* data = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base,
+                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
+
+  std::vector<blitz::Array<double,2> > data_vector;
+  if(list_as_vector(data ,data_vector)==0)
+    self->cxx->mStep(*plda_base->cxx, data_vector);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize ***/
+static auto finalize = bob::extension::FunctionDoc(
+  "finalize",
+  "finalize before the EM steps",
+  "",
+  true
+)
+.add_prototype("plda_base,data")
+.add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "PLDAMachine Object")
+.add_parameter("data", "list", "");
+static PyObject* PyBobLearnEMPLDATrainer_finalize(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = finalize.kwlist(0);
+
+  PyBobLearnEMPLDABaseObject* plda_base = 0;
+  PyObject* data = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base,
+                                                                 &PyList_Type, &data)) Py_RETURN_NONE;
+
+  std::vector<blitz::Array<double,2> > data_vector;
+  if(list_as_vector(data ,data_vector)==0)
+    self->cxx->finalize(*plda_base->cxx, data_vector);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+/*** enrol ***/
+static auto enrol = bob::extension::FunctionDoc(
+  "enrol",
+  "Main procedure for enrolling a PLDAMachine",
+  "",
+  true
+)
+.add_prototype("plda_machine,data")
+.add_parameter("plda_machine", ":py:class:`bob.learn.em.PLDAMachine`", "PLDAMachine Object")
+.add_parameter("data", "list", "");
+static PyObject* PyBobLearnEMPLDATrainer_enrol(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = enrol.kwlist(0);
+
+  PyBobLearnEMPLDAMachineObject* plda_machine = 0;
+  PyBlitzArrayObject* data = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMPLDAMachine_Type, &plda_machine,
+                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
+
+  auto data_ = make_safe(data);
+  self->cxx->enrol(*plda_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+
+  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** is_similar_to ***/
+static auto is_similar_to = bob::extension::FunctionDoc(
+  "is_similar_to",
+  
+  "Compares this PLDATrainer with the ``other`` one to be approximately the same.",
+  "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
+  "relative and absolute precision for the ``weights``, ``biases`` "
+  "and any other values internal to this machine."
+)
+.add_prototype("other, [r_epsilon], [a_epsilon]","output")
+.add_parameter("other", ":py:class:`bob.learn.em.PLDAMachine`", "A PLDAMachine object to be compared.")
+.add_parameter("r_epsilon", "float", "Relative precision.")
+.add_parameter("a_epsilon", "float", "Absolute precision.")
+.add_return("output","bool","True if it is similar, otherwise false.");
+static PyObject* PyBobLearnEMPLDATrainer_IsSimilarTo(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwds) {
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = is_similar_to.kwlist(0);
+
+  //PyObject* other = 0;
+  PyBobLearnEMPLDATrainerObject* other = 0;
+  double r_epsilon = 1.e-5;
+  double a_epsilon = 1.e-8;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!|dd", kwlist,
+        &PyBobLearnEMPLDATrainer_Type, &other,
+        &r_epsilon, &a_epsilon)){
+
+        is_similar_to.print_usage(); 
+        return 0;        
+  }
+
+  if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
+    Py_RETURN_TRUE;
+  else
+    Py_RETURN_FALSE;
+}
+
+
+
+static PyMethodDef PyBobLearnEMPLDATrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    e_step.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_e_step,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step.doc()
+  },
+  {
+    m_step.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_m_step,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step.doc()
+  },
+  {
+    finalize.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_finalize,
+    METH_VARARGS|METH_KEYWORDS,
+    finalize.doc()
+  },  
+  {
+    enrol.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_enrol,
+    METH_VARARGS|METH_KEYWORDS,
+    enrol.doc()
+  },
+  {
+    is_similar_to.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_IsSimilarTo,
+    METH_VARARGS|METH_KEYWORDS,
+    is_similar_to.doc()
+  },
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnEMPLDATrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnEMPLDATrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnEMPLDATrainer_Type.tp_name      = PLDATrainer_doc.name();
+  PyBobLearnEMPLDATrainer_Type.tp_basicsize = sizeof(PyBobLearnEMPLDATrainerObject);
+  PyBobLearnEMPLDATrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE;//Enable the class inheritance;
+  PyBobLearnEMPLDATrainer_Type.tp_doc       = PLDATrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnEMPLDATrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnEMPLDATrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMPLDATrainer_init);
+  PyBobLearnEMPLDATrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMPLDATrainer_delete);
+  PyBobLearnEMPLDATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMPLDATrainer_RichCompare);
+  PyBobLearnEMPLDATrainer_Type.tp_methods      = PyBobLearnEMPLDATrainer_methods;
+  PyBobLearnEMPLDATrainer_Type.tp_getset       = PyBobLearnEMPLDATrainer_getseters;
+  //PyBobLearnEMPLDATrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMPLDATrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnEMPLDATrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnEMPLDATrainer_Type);
+  return PyModule_AddObject(module, "_PLDATrainer", (PyObject*)&PyBobLearnEMPLDATrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/em/test/test_em.py b/bob/learn/em/test/test_em.py
new file mode 100644
index 0000000000000000000000000000000000000000..85cac676b4711ed5c1df0d99b8359b65558b52b2
--- /dev/null
+++ b/bob/learn/em/test/test_em.py
@@ -0,0 +1,252 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Francois Moulin <Francois.Moulin@idiap.ch>
+# Tue May 10 11:35:58 2011 +0200
+#
+# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
+
+"""Test trainer package
+"""
+import unittest
+import numpy
+
+import bob.io.base
+from bob.io.base.test_utils import datafile
+
+from bob.learn.em import KMeansMachine, GMMMachine, KMeansTrainer, \
+    ML_GMMTrainer, MAP_GMMTrainer
+
+#, MAP_GMMTrainer
+
+def loadGMM():
+  gmm = GMMMachine(2, 2)
+
+  gmm.weights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__, path="../data/"))
+  gmm.means = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__, path="../data/"))
+  gmm.variances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__, path="../data/"))
+  #gmm.variance_thresholds = numpy.array([[0.001, 0.001],[0.001, 0.001]], 'float64')
+
+  return gmm
+
+def equals(x, y, epsilon):
+  return (abs(x - y) < epsilon).all()
+
+class MyTrainer1(KMeansTrainer):
+  """Simple example of python trainer: """
+
+  def __init__(self):
+    KMeansTrainer.__init__(self)
+
+  def train(self, machine, data):
+    a = numpy.ndarray((2, 2), 'float64')
+    a[0, :] = data[1]
+    a[1, :] = data[2]
+    machine.means = a
+
+def test_gmm_ML_1():
+
+  # Trains a GMMMachine with ML_GMMTrainer
+
+  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))  
+  gmm = loadGMM()
+  
+  ml_gmmtrainer = ML_GMMTrainer(True, True, True)
+  ml_gmmtrainer.train(gmm, ar)
+
+  #config = bob.io.base.HDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
+  #gmm.save(config)
+  
+  gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML.hdf5', __name__, path="../data/")))
+  gmm_ref_32bit_debug = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML_32bit_debug.hdf5', __name__, path="../data/")))
+  gmm_ref_32bit_release = GMMMachine(bob.io.base.HDF5File(datafile('gmm_ML_32bit_release.hdf5', __name__, path="../data/")))
+
+  assert (gmm == gmm_ref) or (gmm == gmm_ref_32bit_release) or (gmm == gmm_ref_32bit_release)
+
+ 
+def test_gmm_ML_2():
+
+  # Trains a GMMMachine with ML_GMMTrainer; compares to an old reference
+
+  ar = bob.io.base.load(datafile('dataNormalized.hdf5', __name__, path="../data/"))
+
+  # Initialize GMMMachine
+  gmm = GMMMachine(5, 45)
+  gmm.means = bob.io.base.load(datafile('meansAfterKMeans.hdf5', __name__, path="../data/")).astype('float64')
+  gmm.variances = bob.io.base.load(datafile('variancesAfterKMeans.hdf5', __name__, path="../data/")).astype('float64')
+  gmm.weights = numpy.exp(bob.io.base.load(datafile('weightsAfterKMeans.hdf5', __name__, path="../data/")).astype('float64'))
+
+  threshold = 0.001
+  gmm.set_variance_thresholds(threshold)
+
+  # Initialize ML Trainer
+  prior = 0.001
+  max_iter_gmm = 25
+  accuracy = 0.00001
+  ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior, converge_by_likelihood=True)
+  ml_gmmtrainer.max_iterations = max_iter_gmm
+  ml_gmmtrainer.convergence_threshold = accuracy
+  
+  # Run ML
+  ml_gmmtrainer.train(gmm, ar)
+
+
+  # Test results
+  # Load torch3vision reference
+  meansML_ref = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
+  variancesML_ref = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__, path="../data/"))
+  weightsML_ref = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__, path="../data/"))
+
+
+  # Compare to current results
+  assert equals(gmm.means, meansML_ref, 3e-3)
+  assert equals(gmm.variances, variancesML_ref, 3e-3)
+  assert equals(gmm.weights, weightsML_ref, 1e-4)
+
+
+
+def test_gmm_MAP_1():
+
+  # Train a GMMMachine with MAP_GMMTrainer
+
+  ar = bob.io.base.load(datafile('faithful.torch3_f64.hdf5', __name__, path="../data/"))
+
+  gmm = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__, path="../data/")))
+  gmmprior = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__, path="../data/")))
+
+  map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, prior_gmm=gmmprior, relevance_factor=4.)  
+  #map_gmmtrainer.set_prior_gmm(gmmprior)
+  map_gmmtrainer.train(gmm, ar)
+
+  #config = bob.io.base.HDF5File(datafile('gmm_MAP.hdf5", 'w', __name__))
+  #gmm.save(config)
+
+  gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_MAP.hdf5', __name__, path="../data/")))
+
+  assert (equals(gmm.means,gmm_ref.means,1e-3) and equals(gmm.variances,gmm_ref.variances,1e-3) and equals(gmm.weights,gmm_ref.weights,1e-3))
+
+
+def test_gmm_MAP_2():
+
+  # Train a GMMMachine with MAP_GMMTrainer and compare with matlab reference
+
+  data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
+  data = data.reshape((1, data.shape[0])) # make a 2D array out of it
+  means = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
+  variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))
+  weights = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))
+
+  gmm = GMMMachine(2,50)
+  gmm.means = means
+  gmm.variances = variances
+  gmm.weights = weights
+
+  map_adapt = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, mean_var_update_responsibilities_threshold=0.,prior_gmm=gmm, relevance_factor=4.)
+  #map_adapt.set_prior_gmm(gmm)
+
+  gmm_adapted = GMMMachine(2,50)
+  gmm_adapted.means = means
+  gmm_adapted.variances = variances
+  gmm_adapted.weights = weights
+
+  map_adapt.max_iterations = 1
+  map_adapt.train(gmm_adapted, data)
+
+  new_means = bob.io.base.load(datafile('new_adapted_mean.hdf5', __name__, path="../data/"))
+
+ # print new_means[0,:]
+ # print gmm_adapted.means[:,0]
+
+  # Compare to matlab reference
+  assert equals(new_means[0,:], gmm_adapted.means[:,0], 1e-4)
+  assert equals(new_means[1,:], gmm_adapted.means[:,1], 1e-4)
+
+
+def test_gmm_MAP_3():
+
+  # Train a GMMMachine with MAP_GMMTrainer; compares to old reference
+
+  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__, path="../data/"))
+
+  # Initialize GMMMachine
+  n_gaussians = 5
+  n_inputs = 45
+  prior_gmm = GMMMachine(n_gaussians, n_inputs)
+  prior_gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
+  prior_gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__, path="../data/"))
+  prior_gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__, path="../data/"))
+
+  threshold = 0.001
+  prior_gmm.set_variance_thresholds(threshold)
+
+  # Initialize MAP Trainer
+  relevance_factor = 0.1
+  prior = 0.001
+  max_iter_gmm = 1
+  accuracy = 0.00001
+  map_factor = 0.5
+  map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, convergence_threshold=prior, prior_gmm=prior_gmm, alpha=map_factor)
+  map_gmmtrainer.max_iterations = max_iter_gmm
+  map_gmmtrainer.convergence_threshold = accuracy
+
+  gmm = GMMMachine(n_gaussians, n_inputs)
+  gmm.set_variance_thresholds(threshold)
+
+  # Train
+  map_gmmtrainer.train(gmm, ar)
+
+  # Test results
+  # Load torch3vision reference
+  meansMAP_ref = bob.io.base.load(datafile('meansAfterMAP.hdf5', __name__, path="../data/"))
+  variancesMAP_ref = bob.io.base.load(datafile('variancesAfterMAP.hdf5', __name__, path="../data/"))
+  weightsMAP_ref = bob.io.base.load(datafile('weightsAfterMAP.hdf5', __name__, path="../data/"))
+
+  # Compare to current results
+  # Gaps are quite large. This might be explained by the fact that there is no
+  # adaptation of a given Gaussian in torch3 when the corresponding responsibilities
+  # are below the responsibilities threshold
+  assert equals(gmm.means, meansMAP_ref, 2e-1)
+  assert equals(gmm.variances, variancesMAP_ref, 1e-4)
+  assert equals(gmm.weights, weightsMAP_ref, 1e-4)
+
+
+def test_gmm_test():
+
+  # Tests a GMMMachine by computing scores against a model and compare to
+  # an old reference
+
+  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__, path="../data/"))
+
+  # Initialize GMMMachine
+  n_gaussians = 5
+  n_inputs = 45
+  gmm = GMMMachine(n_gaussians, n_inputs)
+  gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
+  gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__, path="../data/"))
+  gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__, path="../data/"))
+
+  threshold = 0.001
+  gmm.set_variance_thresholds(threshold)
+
+  # Test against the model
+  score_mean_ref = -1.50379e+06
+  score = 0.
+  for v in ar: score += gmm(v)
+  score /= len(ar)
+
+  # Compare current results to torch3vision
+  assert abs(score-score_mean_ref)/score_mean_ref<1e-4
+
+
+def test_custom_trainer():
+
+  # Custom python trainer
+
+  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
+
+  mytrainer = MyTrainer1()
+
+  machine = KMeansMachine(2, 2)
+  mytrainer.train(machine, ar)
+
+  for i in range(0, 2):
+    assert (ar[i+1] == machine.means[i, :]).all()
diff --git a/bob/learn/em/test/test_gaussian.py b/bob/learn/em/test/test_gaussian.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e64705d0c74d2445d7887e8b10461edaee1d14a
--- /dev/null
+++ b/bob/learn/em/test/test_gaussian.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Thu Feb 16 16:54:45 2012 +0200
+#
+# Copyright (C) 2011-2013 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests the Gaussian machine
+"""
+
+import os
+import numpy
+import tempfile
+
+import bob.io.base
+
+from bob.learn.em import Gaussian
+
+def equals(x, y, epsilon):
+  return (abs(x - y) < epsilon)
+
+def test_GaussianNormal():
+  # Test the likelihood computation of a simple normal Gaussian
+  gaussian = Gaussian(2)
+  # By default, initialized with zero mean and unit variance
+  logLH = gaussian.log_likelihood(numpy.array([0.4, 0.2], 'float64'))
+  assert equals(logLH, -1.93787706641, 1e-10)
+
+def test_GaussianMachine():
+  # Test a GaussianMachine more thoroughly
+
+  # Initializes a Gaussian with zero mean and unit variance
+  g = Gaussian(3)
+  assert (g.mean == 0.0).all()
+  assert (g.variance == 1.0).all()
+  assert g.shape == (3,)
+
+  # Set and check mean, variance, variance thresholds
+  mean     = numpy.array([0, 1, 2], 'float64')
+  variance = numpy.array([3, 2, 1], 'float64')
+  g.mean     = mean
+  g.variance = variance
+  g.set_variance_thresholds(0.0005)
+  assert (g.mean == mean).all()
+  assert (g.variance == variance).all()
+  assert (g.variance_thresholds == 0.0005).all()
+
+  # Save and read from file
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  g.save(bob.io.base.HDF5File(filename, 'w'))
+  g_loaded = Gaussian(bob.io.base.HDF5File(filename))
+  assert g == g_loaded
+  assert (g != g_loaded ) is False
+  assert g.is_similar_to(g_loaded)
+  
+  # Save and read from file using the keyword argument
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  g.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
+  g_loaded = Gaussian(hdf5=bob.io.base.HDF5File(filename))
+  assert g == g_loaded
+  assert (g != g_loaded ) is False
+  assert g.is_similar_to(g_loaded)
+
+  # Save and loading from file using the keyword argument
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  g.save(bob.io.base.HDF5File(filename, 'w'))
+  g_loaded = bob.learn.em.Gaussian()
+  g_loaded.load(bob.io.base.HDF5File(filename))
+  assert g == g_loaded
+  assert (g != g_loaded ) is False
+  assert g.is_similar_to(g_loaded)
+
+  # Save and loading from file using the keyword argument
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  g.save(bob.io.base.HDF5File(filename, 'w'))
+  g_loaded = bob.learn.em.Gaussian()
+  g_loaded.load(hdf5=bob.io.base.HDF5File(filename))
+  assert g == g_loaded
+  assert (g != g_loaded ) is False
+  assert g.is_similar_to(g_loaded)
+
+
+  # Make them different
+  g_loaded.set_variance_thresholds(0.001)
+  assert (g == g_loaded ) is False
+  assert g != g_loaded
+
+  # Check likelihood computation
+  sample1 = numpy.array([0, 1, 2], 'float64')
+  sample2 = numpy.array([1, 2, 3], 'float64')
+  sample3 = numpy.array([2, 3, 4], 'float64')
+  ref1 = -3.652695334228046
+  ref2 = -4.569362000894712
+  ref3 = -7.319362000894712
+  eps = 1e-10
+  assert equals(g.log_likelihood(sample1), ref1, eps)
+  assert equals(g.log_likelihood(sample2), ref2, eps)
+  assert equals(g.log_likelihood(sample3), ref3, eps)
+
+  # Check resize and assignment
+  g.resize(5)
+  assert g.shape == (5,)
+  g2 = Gaussian()
+  g2 = g
+  assert g == g2
+  assert (g != g2 ) is False
+  g3 = Gaussian(g)
+  assert g == g3
+  assert (g != g3 ) is False
+
+  # Clean-up
+  os.unlink(filename)
diff --git a/bob/learn/em/test/test_gmm.py b/bob/learn/em/test/test_gmm.py
new file mode 100644
index 0000000000000000000000000000000000000000..cc678c1d18ccc40f9e465215d4a2cbd0072123b7
--- /dev/null
+++ b/bob/learn/em/test/test_gmm.py
@@ -0,0 +1,240 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Thu Feb 16 17:57:10 2012 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests the GMM machine and the GMMStats container
+"""
+
+import os
+import numpy
+import tempfile
+
+import bob.io.base
+from bob.io.base.test_utils import datafile
+
+from bob.learn.em import GMMStats, GMMMachine
+
+def test_GMMStats():
+  # Test a GMMStats
+  # Initializes a GMMStats
+  gs = GMMStats(2,3)
+  log_likelihood = -3.
+  T = 57
+  n = numpy.array([4.37, 5.31], 'float64')
+  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
+  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
+  gs.log_likelihood = log_likelihood
+  gs.t = T
+  gs.n = n
+  gs.sum_px = sumpx
+  gs.sum_pxx = sumpxx
+  assert gs.log_likelihood == log_likelihood
+  assert gs.t == T
+  assert (gs.n == n).all()
+  assert (gs.sum_px == sumpx).all()
+  assert (gs.sum_pxx == sumpxx).all()
+  assert gs.shape==(2,3)
+
+  # Saves and reads from file
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  gs.save(bob.io.base.HDF5File(filename, 'w'))
+  gs_loaded = GMMStats(bob.io.base.HDF5File(filename))
+  assert gs == gs_loaded
+  assert (gs != gs_loaded ) is False
+  assert gs.is_similar_to(gs_loaded)
+  
+  # Saves and reads from file using the keyword argument
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  gs.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
+  gs_loaded = GMMStats(bob.io.base.HDF5File(filename))
+  assert gs == gs_loaded
+  assert (gs != gs_loaded ) is False
+  assert gs.is_similar_to(gs_loaded)
+
+  # Saves and load from file using the keyword argument
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  gs.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
+  gs_loaded = GMMStats()
+  gs_loaded.load(bob.io.base.HDF5File(filename))
+  assert gs == gs_loaded
+  assert (gs != gs_loaded ) is False
+  assert gs.is_similar_to(gs_loaded)
+
+  # Saves and load from file using the keyword argument
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  gs.save(hdf5=bob.io.base.HDF5File(filename, 'w'))
+  gs_loaded = GMMStats()
+  gs_loaded.load(hdf5=bob.io.base.HDF5File(filename))
+  assert gs == gs_loaded
+  assert (gs != gs_loaded ) is False
+  assert gs.is_similar_to(gs_loaded)
+  
+  
+  # Makes them different
+  gs_loaded.t = 58
+  assert (gs == gs_loaded ) is False
+  assert gs != gs_loaded
+  assert (gs.is_similar_to(gs_loaded)) is False
+  # Accumulates from another GMMStats
+  gs2 = GMMStats(2,3)
+  gs2.log_likelihood = log_likelihood
+  gs2.t = T
+  gs2.n = n
+  gs2.sum_px = sumpx
+  gs2.sum_pxx = sumpxx
+  gs2 += gs
+  eps = 1e-8
+  assert gs2.log_likelihood == 2*log_likelihood
+  assert gs2.t == 2*T
+  assert numpy.allclose(gs2.n, 2*n, eps)
+  assert numpy.allclose(gs2.sum_px, 2*sumpx, eps)
+  assert numpy.allclose(gs2.sum_pxx, 2*sumpxx, eps)
+
+  # Reinit and checks for zeros
+  gs_loaded.init()
+  assert gs_loaded.log_likelihood == 0
+  assert gs_loaded.t == 0
+  assert (gs_loaded.n == 0).all()
+  assert (gs_loaded.sum_px == 0).all()
+  assert (gs_loaded.sum_pxx == 0).all()
+  # Resize and checks size
+  assert  gs_loaded.shape==(2,3)
+  gs_loaded.resize(4,5)  
+  assert  gs_loaded.shape==(4,5)
+  assert gs_loaded.sum_px.shape[0] == 4
+  assert gs_loaded.sum_px.shape[1] == 5
+
+  # Clean-up
+  os.unlink(filename)
+
+def test_GMMMachine_1():
+  # Test a GMMMachine basic features
+
+  weights   = numpy.array([0.5, 0.5], 'float64')
+  weights2   = numpy.array([0.6, 0.4], 'float64')
+  means     = numpy.array([[3, 70, 0], [4, 72, 0]], 'float64')
+  means2     = numpy.array([[3, 7, 0], [4, 72, 0]], 'float64')
+  variances = numpy.array([[1, 10, 1], [2, 5, 2]], 'float64')
+  variances2 = numpy.array([[10, 10, 1], [2, 5, 2]], 'float64')
+  varianceThresholds = numpy.array([[0, 0, 0], [0, 0, 0]], 'float64')
+  varianceThresholds2 = numpy.array([[0.0005, 0.0005, 0.0005], [0, 0, 0]], 'float64')
+
+  # Initializes a GMMMachine
+  gmm = GMMMachine(2,3)
+  # Sets the weights, means, variances and varianceThresholds and
+  # Checks correctness
+  gmm.weights = weights
+  gmm.means = means
+  gmm.variances = variances
+  gmm.variance_thresholds = varianceThresholds
+  assert gmm.shape == (2,3)
+  assert (gmm.weights == weights).all()
+  assert (gmm.means == means).all()
+  assert (gmm.variances == variances).all()
+  assert (gmm.variance_thresholds == varianceThresholds).all()
+
+  # Checks supervector-like accesses
+  assert (gmm.mean_supervector == means.reshape(means.size)).all()
+  assert (gmm.variance_supervector == variances.reshape(variances.size)).all()
+  newMeans = numpy.array([[3, 70, 2], [4, 72, 2]], 'float64')
+  newVariances = numpy.array([[1, 1, 1], [2, 2, 2]], 'float64')
+
+
+  # Checks particular varianceThresholds-related methods
+  varianceThresholds1D = numpy.array([0.3, 1, 0.5], 'float64')
+  gmm.set_variance_thresholds(varianceThresholds1D)
+  assert (gmm.variance_thresholds[0,:] == varianceThresholds1D).all()
+  assert (gmm.variance_thresholds[1,:] == varianceThresholds1D).all()
+
+  gmm.set_variance_thresholds(0.005)
+  assert (gmm.variance_thresholds == 0.005).all()
+
+  # Checks Gaussians access
+  gmm.means     = newMeans
+  gmm.variances = newVariances
+  assert (gmm.get_gaussian(0).mean == newMeans[0,:]).all()
+  assert (gmm.get_gaussian(1).mean == newMeans[1,:]).all()
+  assert (gmm.get_gaussian(0).variance == newVariances[0,:]).all()
+  assert (gmm.get_gaussian(1).variance == newVariances[1,:]).all()
+
+  # Checks resize
+  gmm.resize(4,5)
+  assert gmm.shape == (4,5)
+
+  # Checks comparison
+  gmm2 = GMMMachine(gmm)
+  gmm3 = GMMMachine(2,3)
+  gmm3.weights = weights2
+  gmm3.means = means
+  gmm3.variances = variances
+  #gmm3.varianceThresholds = varianceThresholds
+  gmm4 = GMMMachine(2,3)
+  gmm4.weights = weights
+  gmm4.means = means2
+  gmm4.variances = variances
+  #gmm4.varianceThresholds = varianceThresholds
+  gmm5 = GMMMachine(2,3)
+  gmm5.weights = weights
+  gmm5.means = means
+  gmm5.variances = variances2
+  #gmm5.varianceThresholds = varianceThresholds
+  gmm6 = GMMMachine(2,3)
+  gmm6.weights = weights
+  gmm6.means = means
+  gmm6.variances = variances
+  #gmm6.varianceThresholds = varianceThresholds2
+
+  assert gmm == gmm2
+  assert (gmm != gmm2) is False
+  assert gmm.is_similar_to(gmm2)
+  assert gmm != gmm3
+  assert (gmm == gmm3) is False
+  assert gmm.is_similar_to(gmm3) is False
+  assert gmm != gmm4
+  assert (gmm == gmm4) is False
+  assert gmm.is_similar_to(gmm4) is False
+  assert gmm != gmm5
+  assert (gmm == gmm5) is False
+  assert gmm.is_similar_to(gmm5) is False
+  assert gmm != gmm6
+  assert (gmm == gmm6) is False
+  assert gmm.is_similar_to(gmm6) is False
+
+def test_GMMMachine_2():
+  # Test a GMMMachine (statistics)
+
+  arrayset = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__, path="../data/"))
+  gmm = GMMMachine(2, 2)
+  gmm.weights   = numpy.array([0.5, 0.5], 'float64')
+  gmm.means     = numpy.array([[3, 70], [4, 72]], 'float64')
+  gmm.variances = numpy.array([[1, 10], [2, 5]], 'float64')
+  gmm.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
+
+  stats = GMMStats(2, 2)
+  gmm.acc_statistics(arrayset, stats)
+
+  stats_ref = GMMStats(bob.io.base.HDF5File(datafile("stats.hdf5",__name__, path="../data/")))
+
+  assert stats.t == stats_ref.t
+  assert numpy.allclose(stats.n, stats_ref.n, atol=1e-10)
+  #assert numpy.array_equal(stats.sumPx, stats_ref.sumPx)
+  #Note AA: precision error above
+  assert numpy.allclose(stats.sum_px, stats_ref.sum_px, atol=1e-10)
+  assert numpy.allclose(stats.sum_pxx, stats_ref.sum_pxx, atol=1e-10)
+
+def test_GMMMachine_3():
+  # Test a GMMMachine (log-likelihood computation)
+
+  data = bob.io.base.load(datafile('data.hdf5', __name__, path="../data/"))
+  gmm = GMMMachine(2, 50)
+  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__, path="../data/"))
+  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__, path="../data/"))
+  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__, path="../data/"))
+
+  # Compare the log-likelihood with the one obtained using Chris Matlab
+  # implementation
+  matlab_ll_ref = -2.361583051672024e+02
+  assert abs(gmm(data) - matlab_ll_ref) < 1e-10
diff --git a/bob/learn/em/test/test_ivector.py b/bob/learn/em/test/test_ivector.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f8ca98cb9f60797655d53d2f4cb69d2ee389ce7
--- /dev/null
+++ b/bob/learn/em/test/test_ivector.py
@@ -0,0 +1,159 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Mon Apr 2 11:19:00 2013 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+
+"""Tests the I-Vector machine
+"""
+
+import numpy
+import numpy.linalg
+import numpy.random
+
+from bob.learn.em import GMMMachine, GMMStats, IVectorMachine
+
+
+### Test class inspired by an implementation of Chris McCool
+### Chris McCool (chris.mccool@nicta.com.au)
+class IVectorMachinePy():
+  """An IVector extractor"""
+
+  def __init__(self, ubm=None, dim_t=1):
+    # Our state
+    self.m_ubm = ubm
+    self.m_dim_t = dim_t
+    # Resize the matrices T and sigma
+    self.resize()
+    # Precompute
+    self.precompute()
+
+  def resize(self):
+    if self.m_ubm:
+      dim_cd = self.m_ubm.shape[0] * self.m_ubm.shape[1]
+      self.m_t = numpy.random.randn(dim_cd, self.m_dim_t)
+      self.m_sigma = numpy.random.randn(dim_cd)
+
+  def precompute(self):
+    if self.m_ubm and self.m_t is not None and self.m_sigma is not None:
+      #dim_c = self.m_ubm.dim_c
+      #dim_d = self.m_ubm.dim_d
+      dim_c,dim_d = self.m_ubm.shape
+      self.m_cache_TtSigmaInv = {}
+      self.m_cache_TtSigmaInvT = {}
+      for c in range(dim_c):
+        start                       = c*dim_d
+        end                         = (c+1)*dim_d
+        Tc                          = self.m_t[start:end,:]
+        self.m_cache_TtSigmaInv[c]  = Tc.transpose() / self.m_sigma[start:end]
+        self.m_cache_TtSigmaInvT[c] = numpy.dot(self.m_cache_TtSigmaInv[c], Tc);
+
+  def set_ubm(self, ubm):
+    self.m_ubm = ubm
+    # Precompute
+    self.precompute()
+
+  def get_ubm(self):
+    return self.m_ubm
+
+  def set_t(self, t):
+    # @warning: no dimensions check
+    self.m_t = t
+    # Precompute
+    self.precompute()
+
+  def get_t(self):
+    return self.m_t
+
+  def set_sigma(self, sigma):
+    # @warning: no dimensions check
+    self.m_sigma = sigma
+    # Precompute
+    self.precompute()
+
+  def get_sigma(self):
+    return self.m_sigma
+
+
+  def _get_TtSigmaInv_Fnorm(self, N, F):
+    # Initialization
+    #dim_c = self.m_ubm.dim_c
+    #dim_d = self.m_ubm.dim_d
+    dim_c,dim_d = self.m_ubm.shape
+    mean_supervector = self.m_ubm.mean_supervector
+    TtSigmaInv_Fnorm = numpy.zeros(shape=(self.m_dim_t,), dtype=numpy.float64)
+
+    # Loop over each Gaussian component
+    dim_c = self.m_ubm.shape[0]
+    for c in range(dim_c):
+      start             = c*dim_d
+      end               = (c+1)*dim_d
+      Fnorm             = F[c,:] - N[c] * mean_supervector[start:end]
+      TtSigmaInv_Fnorm  = TtSigmaInv_Fnorm + numpy.dot(self.m_cache_TtSigmaInv[c], Fnorm)
+    return TtSigmaInv_Fnorm
+
+  def _get_I_TtSigmaInvNT(self, N):
+    # Initialization
+    #dim_c = self.m_ubm.dim_c
+    #dim_d = self.m_ubm.dim_d
+    dim_c, dim_d = self.m_ubm.shape
+
+    TtSigmaInvNT = numpy.eye(self.m_dim_t, dtype=numpy.float64)
+    for c in range(dim_c):
+      TtSigmaInvNT = TtSigmaInvNT + self.m_cache_TtSigmaInvT[c] * N[c]
+
+    return TtSigmaInvNT
+
+  def forward(self, gmmstats):
+    if self.m_ubm and not (self.m_t == None) and not (self.m_sigma == None):
+      N = gmmstats.n
+      F = gmmstats.sum_px
+
+      TtSigmaInv_Fnorm = self._get_TtSigmaInv_Fnorm(N, F)
+      TtSigmaInvNT = self._get_I_TtSigmaInvNT(N)
+
+      return numpy.linalg.solve(TtSigmaInvNT, TtSigmaInv_Fnorm)
+
+
+def test_machine():
+
+  # Ubm
+  ubm = GMMMachine(2,3)
+  ubm.weights = numpy.array([0.4,0.6])
+  ubm.means = numpy.array([[1.,7,4],[4,5,3]])
+  ubm.variances = numpy.array([[0.5,1.,1.5],[1.,1.5,2.]])
+
+  # Defines GMMStats
+  gs = GMMStats(2,3)
+  log_likelihood = -3.
+  T = 1
+  n = numpy.array([0.4, 0.6], numpy.float64)
+  sumpx = numpy.array([[1., 2., 3.], [2., 4., 3.]], numpy.float64)
+  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], numpy.float64)
+  gs.log_likelihood = log_likelihood
+  gs.t = T
+  gs.n = n
+  gs.sum_px = sumpx
+  gs.sum_pxx = sumpxx
+
+  # IVector (Python)
+  m = IVectorMachinePy(ubm, 2)
+  t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
+  m.set_t(t)
+  sigma = numpy.array([1.,2.,1.,3.,2.,4.])
+  m.set_sigma(sigma)
+
+  wij_ref = numpy.array([-0.04213415, 0.21463343]) # Reference from original Chris implementation
+  wij = m.forward(gs)
+  assert numpy.allclose(wij_ref, wij, 1e-5)
+
+  # IVector (C++)
+  mc = IVectorMachine(ubm, 2)
+  mc.t = t
+  mc.sigma = sigma
+
+  wij_ref = numpy.array([-0.04213415, 0.21463343]) # Reference from original Chris implementation
+  wij = mc(gs)
+  assert numpy.allclose(wij_ref, wij, 1e-5)
diff --git a/bob/learn/em/test/test_ivector_trainer.py b/bob/learn/em/test/test_ivector_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..9ef48e21ffb21c1227954402d57c69c48c9aae08
--- /dev/null
+++ b/bob/learn/em/test/test_ivector_trainer.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests the I-Vector trainer
+"""
+
+import numpy
+import numpy.linalg
+import numpy.random
+
+from bob.learn.em import GMMMachine, GMMStats, IVectorMachine, IVectorTrainer
+
+### Test class inspired by an implementation of Chris McCool
+### Chris McCool (chris.mccool@nicta.com.au)
+class IVectorTrainerPy():
+  """An IVector extractor"""
+
+  def __init__(self, convergence_threshold=0.001, max_iterations=10,
+      compute_likelihood=False, sigma_update=False, variance_floor=1e-5):
+    self.m_convergence_threshold = convergence_threshold
+    self.m_max_iterations = max_iterations
+    self.m_compute_likelihood = compute_likelihood
+    self.m_sigma_update = sigma_update
+    self.m_variance_floor = variance_floor
+
+  def initialize(self, machine, data):
+    ubm = machine.ubm
+    self.m_dim_c = ubm.shape[0]
+    self.m_dim_d = ubm.shape[1]
+    self.m_dim_t = machine.t.shape[1]
+    self.m_meansupervector = ubm.mean_supervector
+    t = numpy.random.randn(self.m_dim_c*self.m_dim_d, self.m_dim_t)
+    machine.t = t
+    machine.sigma = machine.ubm.variance_supervector
+
+  def e_step(self, machine, data):
+    n_samples = len(data)
+    self.m_acc_Nij_Sigma_wij2  = {}
+    self.m_acc_Fnorm_Sigma_wij = {}
+    self.m_acc_Snorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
+    self.m_N = numpy.zeros(shape=(self.m_dim_c,), dtype=numpy.float64)
+
+    for c in range(self.m_dim_c):
+      self.m_acc_Nij_Sigma_wij2[c]  = numpy.zeros(shape=(self.m_dim_t,self.m_dim_t), dtype=numpy.float64)
+      self.m_acc_Fnorm_Sigma_wij[c] = numpy.zeros(shape=(self.m_dim_d,self.m_dim_t), dtype=numpy.float64)
+
+    for n in range(n_samples):
+      Nij = data[n].n
+      Fij = data[n].sum_px
+      Sij = data[n].sum_pxx
+
+      # Estimate latent variables
+      TtSigmaInv_Fnorm = machine.__compute_TtSigmaInvFnorm__(data[n])
+      I_TtSigmaInvNT = machine.__compute_Id_TtSigmaInvT__(data[n])
+
+      Fnorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
+      Snorm = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,), dtype=numpy.float64)
+
+      # Compute normalized statistics
+      for c in range(self.m_dim_c):
+        start            = c*self.m_dim_d
+        end              = (c+1)*self.m_dim_d
+
+        Fc               = Fij[c,:]
+        Sc               = Sij[c,:]
+        mc               = self.m_meansupervector[start:end]
+
+        Fc_mc            = Fc * mc
+        Nc_mc_mcT        = Nij[c] * mc * mc
+
+        Fnorm[start:end] = Fc - Nij[c] * mc
+        Snorm[start:end] = Sc - (2 * Fc_mc) + Nc_mc_mcT
+
+      # Latent variables
+      I_TtSigmaInvNT_inv = numpy.linalg.inv(I_TtSigmaInvNT)
+      E_w_ij             = numpy.dot(I_TtSigmaInvNT_inv, TtSigmaInv_Fnorm)
+      E_w_ij2            = I_TtSigmaInvNT_inv + numpy.outer(E_w_ij, E_w_ij)
+
+      # Do the accumulation for each component
+      self.m_acc_Snorm   = self.m_acc_Snorm + Snorm    # (dim_c*dim_d)
+      for c in range(self.m_dim_c):
+        start            = c*self.m_dim_d
+        end              = (c+1)*self.m_dim_d
+        current_Fnorm    = Fnorm[start:end]            # (dim_d)
+        self.m_acc_Nij_Sigma_wij2[c]  = self.m_acc_Nij_Sigma_wij2[c] + Nij[c] * E_w_ij2                    # (dim_t, dim_t)
+        self.m_acc_Fnorm_Sigma_wij[c] = self.m_acc_Fnorm_Sigma_wij[c] + numpy.outer(current_Fnorm, E_w_ij) # (dim_d, dim_t)
+        self.m_N[c]                   = self.m_N[c] + Nij[c]
+
+
+  def m_step(self, machine, data):
+    A = self.m_acc_Nij_Sigma_wij2
+
+    T = numpy.zeros(shape=(self.m_dim_c*self.m_dim_d,self.m_dim_t), dtype=numpy.float64)
+    Told = machine.t
+    if self.m_sigma_update:
+      sigma = numpy.zeros(shape=self.m_acc_Snorm.shape, dtype=numpy.float64)
+    for c in range(self.m_dim_c):
+      start = c*self.m_dim_d;
+      end   = (c+1)*self.m_dim_d;
+      # T update
+      A     = self.m_acc_Nij_Sigma_wij2[c].transpose()
+      B     = self.m_acc_Fnorm_Sigma_wij[c].transpose()
+      if numpy.array_equal(A, numpy.zeros(A.shape)):
+        X = numpy.zeros(shape=(self.m_dim_t,self.m_dim_d), dtype=numpy.float64)
+      else:
+        X = numpy.linalg.solve(A, B)
+      T[start:end,:] = X.transpose()
+      # Sigma update
+      if self.m_sigma_update:
+        Told_c           = Told[start:end,:].transpose()
+        # warning: Use of the new T estimate! (revert second next line if you don't want that)
+        Fnorm_Ewij_Tt    = numpy.diag(numpy.dot(self.m_acc_Fnorm_Sigma_wij[c], X))
+        #Fnorm_Ewij_Tt = numpy.diag(numpy.dot(self.m_acc_Fnorm_Sigma_wij[c], Told_c))
+        sigma[start:end] = (self.m_acc_Snorm[start:end] - Fnorm_Ewij_Tt) / self.m_N[c]
+
+    machine.t = T
+    if self.m_sigma_update:
+      sigma[sigma < self.m_variance_floor] = self.m_variance_floor
+      machine.sigma = sigma
+
+  def finalize(self, machine, data):
+    pass
+
+  def train(self, machine, data):
+    self.initialize(machine, data)
+    average_output_previous   = -sys.maxsize
+    average_output            = -sys.maxsize
+    self.e_step(machine, data)
+
+    i = 0
+    while True:
+      average_output_previous = average_output
+      self.m_step(machine, data)
+      self.e_step(machine, data)
+      if(self.m_max_iterations > 0 and i+1 >= self.m_max_iterations):
+        break
+      i += 1
+
+
+def test_trainer_nosigma():
+  # Ubm
+  ubm = GMMMachine(2,3)
+  ubm.weights = numpy.array([0.4,0.6])
+  ubm.means = numpy.array([[1.,7,4],[4,5,3]])
+  ubm.variances = numpy.array([[0.5,1.,1.5],[1.,1.5,2.]])
+
+  # Defines GMMStats
+  gs1 = GMMStats(2,3)
+  log_likelihood1 = -3.
+  T1 = 1
+  n1 = numpy.array([0.4, 0.6], numpy.float64)
+  sumpx1 = numpy.array([[1., 2., 3.], [2., 4., 3.]], numpy.float64)
+  sumpxx1 = numpy.array([[10., 20., 30.], [40., 50., 60.]], numpy.float64)
+  gs1.log_likelihood = log_likelihood1
+  gs1.t = T1
+  gs1.n = n1
+  gs1.sum_px = sumpx1
+  gs1.sum_pxx = sumpxx1
+
+  gs2 = GMMStats(2,3)
+  log_likelihood2 = -4.
+  T2 = 1
+  n2 = numpy.array([0.2, 0.8], numpy.float64)
+  sumpx2 = numpy.array([[2., 1., 3.], [3., 4.1, 3.2]], numpy.float64)
+  sumpxx2 = numpy.array([[12., 15., 25.], [39., 51., 62.]], numpy.float64)
+  gs2.log_likelihood = log_likelihood2
+  gs2.t = T2
+  gs2.n = n2
+  gs2.sum_px = sumpx2
+  gs2.sum_pxx = sumpxx2
+
+  data = [gs1, gs2]
+
+
+  acc_Nij_Sigma_wij2_ref1  = {0: numpy.array([[ 0.03202305, -0.02947769], [-0.02947769,  0.0561132 ]]),
+                             1: numpy.array([[ 0.07953279, -0.07829414], [-0.07829414,  0.13814242]])}
+  acc_Fnorm_Sigma_wij_ref1 = {0: numpy.array([[-0.29622691,  0.61411796], [ 0.09391764, -0.27955961], [-0.39014455,  0.89367757]]),
+                             1: numpy.array([[ 0.04695882, -0.13977981], [-0.05718673,  0.24159665], [-0.17098161,  0.47326585]])}
+  acc_Snorm_ref1           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
+  N_ref1                   = numpy.array([0.6, 1.4])
+  t_ref1                   = numpy.array([[  1.59543739, 11.78239235], [ -3.20130371, -6.66379081], [  4.79674111, 18.44618316],
+                                          [ -0.91765407, -1.5319461 ], [  2.26805901,  3.03434944], [  2.76600031,  4.9935962 ]])
+
+  acc_Nij_Sigma_wij2_ref2  = {0: numpy.array([[ 0.37558389, -0.15405228], [-0.15405228,  0.1421269 ]]),
+                             1: numpy.array([[ 1.02076081, -0.57683953], [-0.57683953,  0.53912239]])}
+  acc_Fnorm_Sigma_wij_ref2 = {0: numpy.array([[-1.1261668 ,  1.46496753], [-0.03579289, -0.37875811], [-1.09037391,  1.84372565]]),
+                             1: numpy.array([[-0.01789645, -0.18937906], [ 0.35221084,  0.15854126], [-0.10004552,  0.72559036]])}
+  acc_Snorm_ref2           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
+  N_ref2                   = numpy.array([0.6, 1.4])
+  t_ref2                   = numpy.array([[  2.2133685,  12.70654597], [ -2.13959381, -4.98404887], [  4.35296231, 17.69059484],
+                                          [ -0.54644055, -0.93594252], [  1.29308324,  1.67762053], [  1.67583072,  3.13894546]])
+  acc_Nij_Sigma_wij2_ref = [acc_Nij_Sigma_wij2_ref1, acc_Nij_Sigma_wij2_ref2]
+  acc_Fnorm_Sigma_wij_ref = [acc_Fnorm_Sigma_wij_ref1, acc_Fnorm_Sigma_wij_ref2]
+  acc_Snorm_ref = [acc_Snorm_ref1, acc_Snorm_ref2]
+  N_ref = [N_ref1, N_ref2]
+  t_ref = [t_ref1, t_ref2]
+
+  # Python implementation
+  # Machine
+  m = IVectorMachine(ubm, 2)
+  t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
+  sigma = numpy.array([1.,2.,1.,3.,2.,4.])
+
+  # Initialization
+  trainer = IVectorTrainerPy()
+  trainer.initialize(m, data)
+  m.t = t
+  m.sigma = sigma
+  for it in range(2):
+    # E-Step
+    trainer.e_step(m, data)
+    for k in acc_Nij_Sigma_wij2_ref[it]:
+      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.m_acc_Nij_Sigma_wij2[k], 1e-5)
+    for k in acc_Fnorm_Sigma_wij_ref[it]:
+      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.m_acc_Fnorm_Sigma_wij[k], 1e-5)
+    assert numpy.allclose(acc_Snorm_ref[it], trainer.m_acc_Snorm, 1e-5)
+    assert numpy.allclose(N_ref[it], trainer.m_N, 1e-5)
+
+    # M-Step
+    trainer.m_step(m, data)
+    assert numpy.allclose(t_ref[it], m.t, 1e-5)
+
+  # C++ implementation
+  # Machine
+  m = IVectorMachine(ubm, 2)
+
+  # Initialization
+  trainer = IVectorTrainer()
+  trainer.initialize(m)
+  m.t = t
+  m.sigma = sigma
+  for it in range(2):
+    # E-Step
+    trainer.e_step(m, data)
+    for k in acc_Nij_Sigma_wij2_ref[it]:
+      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.acc_nij_wij2[k], 1e-5)
+    for k in acc_Fnorm_Sigma_wij_ref[it]:
+      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.acc_fnormij_wij[k], 1e-5)
+
+    # M-Step
+    trainer.m_step(m)
+    assert numpy.allclose(t_ref[it], m.t, 1e-5)
+
+def test_trainer_update_sigma():
+  # Ubm
+  dim_c = 2
+  dim_d = 3
+  ubm = GMMMachine(dim_c,dim_d)
+  ubm.weights = numpy.array([0.4,0.6])
+  ubm.means = numpy.array([[1.,7,4],[4,5,3]])
+  ubm.variances = numpy.array([[0.5,1.,1.5],[1.,1.5,2.]])
+
+  # Defines GMMStats
+  gs1 = GMMStats(dim_c,dim_d)
+  log_likelihood1 = -3.
+  T1 = 1
+  n1 = numpy.array([0.4, 0.6], numpy.float64)
+  sumpx1 = numpy.array([[1., 2., 3.], [2., 4., 3.]], numpy.float64)
+  sumpxx1 = numpy.array([[10., 20., 30.], [40., 50., 60.]], numpy.float64)
+  gs1.log_likelihood = log_likelihood1
+  gs1.t = T1
+  gs1.n = n1
+  gs1.sum_px = sumpx1
+  gs1.sum_pxx = sumpxx1
+
+  gs2 = GMMStats(dim_c,dim_d)
+  log_likelihood2 = -4.
+  T2 = 1
+  n2 = numpy.array([0.2, 0.8], numpy.float64)
+  sumpx2 = numpy.array([[2., 1., 3.], [3., 4.1, 3.2]], numpy.float64)
+  sumpxx2 = numpy.array([[12., 15., 25.], [39., 51., 62.]], numpy.float64)
+  gs2.log_likelihood = log_likelihood2
+  gs2.t = T2
+  gs2.n = n2
+  gs2.sum_px = sumpx2
+  gs2.sum_pxx = sumpxx2
+
+  data = [gs1, gs2]
+
+  # Reference values
+  acc_Nij_Sigma_wij2_ref1  = {0: numpy.array([[ 0.03202305, -0.02947769], [-0.02947769,  0.0561132 ]]),
+                              1: numpy.array([[ 0.07953279, -0.07829414], [-0.07829414,  0.13814242]])}
+  acc_Fnorm_Sigma_wij_ref1 = {0: numpy.array([[-0.29622691,  0.61411796], [ 0.09391764, -0.27955961], [-0.39014455,  0.89367757]]),
+                              1: numpy.array([[ 0.04695882, -0.13977981], [-0.05718673,  0.24159665], [-0.17098161,  0.47326585]])}
+  acc_Snorm_ref1           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
+  N_ref1                   = numpy.array([0.6, 1.4])
+  t_ref1                   = numpy.array([[  1.59543739, 11.78239235], [ -3.20130371, -6.66379081], [  4.79674111, 18.44618316],
+                                          [ -0.91765407, -1.5319461 ], [  2.26805901,  3.03434944], [  2.76600031,  4.9935962 ]])
+  sigma_ref1               = numpy.array([ 16.39472121, 34.72955353,  3.3108037, 43.73496916, 38.85472445, 68.22116903])
+
+  acc_Nij_Sigma_wij2_ref2  = {0: numpy.array([[ 0.50807426, -0.11907756], [-0.11907756,  0.12336544]]),
+                              1: numpy.array([[ 1.18602399, -0.2835859 ], [-0.2835859 ,  0.39440498]])}
+  acc_Fnorm_Sigma_wij_ref2 = {0: numpy.array([[ 0.07221453,  1.1189786 ], [-0.08681275, -0.35396112], [ 0.15902728,  1.47293972]]),
+                              1: numpy.array([[-0.04340637, -0.17698056], [ 0.10662127,  0.21484933],[ 0.13116645,  0.64474271]])}
+  acc_Snorm_ref2           = numpy.array([16.6, 22.4, 16.6, 61.4, 55., 97.4])
+  N_ref2                   = numpy.array([0.6, 1.4])
+  t_ref2                   = numpy.array([[  2.93105054, 11.89961223], [ -1.08988119, -3.92120757], [  4.02093173, 15.82081981],
+                                          [ -0.17376634, -0.57366984], [  0.26585634,  0.73589952], [  0.60557877,   2.07014704]])
+  sigma_ref2               = numpy.array([5.12154025e+00, 3.48623823e+01, 1.00000000e-05, 4.37792350e+01, 3.91525332e+01, 6.85613258e+01])
+
+  acc_Nij_Sigma_wij2_ref = [acc_Nij_Sigma_wij2_ref1, acc_Nij_Sigma_wij2_ref2]
+  acc_Fnorm_Sigma_wij_ref = [acc_Fnorm_Sigma_wij_ref1, acc_Fnorm_Sigma_wij_ref2]
+  acc_Snorm_ref = [acc_Snorm_ref1, acc_Snorm_ref2]
+  N_ref = [N_ref1, N_ref2]
+  t_ref = [t_ref1, t_ref2]
+  sigma_ref = [sigma_ref1, sigma_ref2]
+
+
+  # Python implementation
+  # Machine
+  m = IVectorMachine(ubm, 2)
+  t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
+  sigma = numpy.array([1.,2.,1.,3.,2.,4.])
+
+  # Initialization
+  trainer = IVectorTrainerPy(sigma_update=True)
+  trainer.initialize(m, data)
+  m.t = t
+  m.sigma = sigma
+  for it in range(2):
+    # E-Step
+    trainer.e_step(m, data)
+    for k in acc_Nij_Sigma_wij2_ref[it]:
+      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.m_acc_Nij_Sigma_wij2[k], 1e-5)
+    for k in acc_Fnorm_Sigma_wij_ref[it]:
+      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.m_acc_Fnorm_Sigma_wij[k], 1e-5)
+    assert numpy.allclose(acc_Snorm_ref[it], trainer.m_acc_Snorm, 1e-5)
+    assert numpy.allclose(N_ref[it], trainer.m_N, 1e-5)
+
+    # M-Step
+    trainer.m_step(m, data)
+    assert numpy.allclose(t_ref[it], m.t, 1e-5)
+    assert numpy.allclose(sigma_ref[it], m.sigma, 1e-5)
+
+
+  # C++ implementation
+  # Machine
+  m = IVectorMachine(ubm, 2)
+  m.variance_threshold = 1e-5
+
+  # Initialization
+  trainer = IVectorTrainer(update_sigma=True)
+  trainer.initialize(m)
+  m.t = t
+  m.sigma = sigma
+  for it in range(2):
+    # E-Step
+    trainer.e_step(m, data)
+    for k in acc_Nij_Sigma_wij2_ref[it]:
+      assert numpy.allclose(acc_Nij_Sigma_wij2_ref[it][k], trainer.acc_nij_wij2[k], 1e-5)
+    for k in acc_Fnorm_Sigma_wij_ref[it]:
+      assert numpy.allclose(acc_Fnorm_Sigma_wij_ref[it][k], trainer.acc_fnormij_wij[k], 1e-5)
+    assert numpy.allclose(acc_Snorm_ref[it].reshape(dim_c,dim_d), trainer.acc_snormij, 1e-5)
+    assert numpy.allclose(N_ref[it], trainer.acc_nij, 1e-5)
+
+    # M-Step
+    trainer.m_step(m)
+    assert numpy.allclose(t_ref[it], m.t, 1e-5)
+    assert numpy.allclose(sigma_ref[it], m.sigma, 1e-5)
+
diff --git a/bob/learn/em/test/test_jfa.py b/bob/learn/em/test/test_jfa.py
new file mode 100644
index 0000000000000000000000000000000000000000..ebcd5d1a44f5dcd4c3ca01d787d7f416452d4f63
--- /dev/null
+++ b/bob/learn/em/test/test_jfa.py
@@ -0,0 +1,396 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Wed Feb 15 23:24:35 2012 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests on the JFA-based machines
+"""
+
+import os
+import numpy
+import numpy.linalg
+import tempfile
+
+import bob.io.base
+
+from bob.learn.em import GMMMachine, GMMStats, JFABase, ISVBase, ISVMachine, JFAMachine
+
+def estimate_x(dim_c, dim_d, mean, sigma, U, N, F):
+  # Compute helper values
+  UtSigmaInv = {}
+  UtSigmaInvU = {}
+  dim_ru = U.shape[1]
+  for c in range(dim_c):
+    start                       = c*dim_d
+    end                         = (c+1)*dim_d
+    Uc                          = U[start:end,:]
+    UtSigmaInv[c]  = Uc.transpose() / sigma[start:end]
+    UtSigmaInvU[c] = numpy.dot(UtSigmaInv[c], Uc);
+
+  # I + (U^{T} \Sigma^-1 N U)
+  I_UtSigmaInvNU = numpy.eye(dim_ru, dtype=numpy.float64)
+  for c in range(dim_c):
+    I_UtSigmaInvNU = I_UtSigmaInvNU + UtSigmaInvU[c] * N[c]
+
+  # U^{T} \Sigma^-1 F
+  UtSigmaInv_Fnorm = numpy.zeros((dim_ru,), numpy.float64)
+  for c in range(dim_c):
+    start             = c*dim_d
+    end               = (c+1)*dim_d
+    Fnorm             = F[c,:] - N[c] * mean[start:end]
+    UtSigmaInv_Fnorm  = UtSigmaInv_Fnorm + numpy.dot(UtSigmaInv[c], Fnorm)
+
+  return numpy.linalg.solve(I_UtSigmaInvNU, UtSigmaInv_Fnorm)
+
+def estimate_ux(dim_c, dim_d, mean, sigma, U, N, F):
+  return numpy.dot(U, estimate_x(dim_c, dim_d, mean, sigma, U, N, F))
+
+
+def test_JFABase():
+
+  # Creates a UBM
+  weights = numpy.array([0.4, 0.6], 'float64')
+  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
+  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
+  ubm = GMMMachine(2,3)
+  ubm.weights = weights
+  ubm.means = means
+  ubm.variances = variances
+
+  # Creates a JFABase
+  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
+  V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
+  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
+  m = JFABase(ubm, ru=1, rv=1)
+  
+  _,_,ru,rv = m.shape 
+  assert ru == 1
+  assert rv == 1
+
+  # Checks for correctness
+  m.resize(2,2)
+  m.u = U
+  m.v = V
+  m.d = d  
+  n_gaussians,dim,ru,rv = m.shape
+  supervector_length    = m.supervector_length
+  
+  assert (m.u == U).all()
+  assert (m.v == V).all()
+  assert (m.d == d).all()  
+  assert n_gaussians        == 2
+  assert dim                == 3
+  assert supervector_length == 6
+  assert ru                 == 2
+  assert rv                 == 2
+
+  # Saves and loads
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  m.save(bob.io.base.HDF5File(filename, 'w'))
+  m_loaded = JFABase(bob.io.base.HDF5File(filename))
+  m_loaded.ubm = ubm
+  assert m == m_loaded
+  assert (m != m_loaded) is False
+  assert m.is_similar_to(m_loaded)
+
+  # Copy constructor
+  mc = JFABase(m)
+  assert m == mc
+
+  # Variant
+  #mv = JFABase()
+  # Checks for correctness
+  #mv.ubm = ubm
+  #mv.resize(2,2)
+  #mv.u = U
+  #mv.v = V
+  #mv.d = d
+  #assert (m.u == U).all()
+  #assert (m.v == V).all()
+  #assert (m.d == d).all()
+  #assert m.dim_c == 2
+  #assert m.dim_d == 3
+  #assert m.dim_cd == 6
+  #assert m.dim_ru == 2
+  #assert m.dim_rv == 2
+
+  # Clean-up
+  os.unlink(filename)
+
+def test_ISVBase():
+
+  # Creates a UBM
+  weights = numpy.array([0.4, 0.6], 'float64')
+  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
+  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
+  ubm           = GMMMachine(2,3)
+  ubm.weights   = weights
+  ubm.means     = means
+  ubm.variances = variances
+
+  # Creates a ISVBase
+  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
+  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
+  m = ISVBase(ubm, ru=1)
+  _,_,ru = m.shape
+  assert ru == 1
+
+  # Checks for correctness
+  m.resize(2)
+  m.u = U
+  m.d = d
+  n_gaussians,dim,ru = m.shape
+  supervector_length = m.supervector_length
+  assert (m.u == U).all()
+  assert (m.d == d).all()  
+  assert n_gaussians        == 2
+  assert dim                == 3
+  assert supervector_length == 6
+  assert ru                 == 2
+
+  # Saves and loads
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  m.save(bob.io.base.HDF5File(filename, 'w'))
+  m_loaded = ISVBase(bob.io.base.HDF5File(filename))
+  m_loaded.ubm = ubm
+  assert m == m_loaded
+  assert (m != m_loaded) is False
+  assert m.is_similar_to(m_loaded)
+
+  # Copy constructor
+  mc = ISVBase(m)
+  assert m == mc
+
+  # Variant
+  #mv = ISVBase()
+  # Checks for correctness
+  #mv.ubm = ubm
+  #mv.resize(2)
+  #mv.u = U
+  #mv.d = d
+  #assert (m.u == U).all()
+  #assert (m.d == d).all()
+  #ssert m.dim_c == 2
+  #assert m.dim_d == 3
+  #assert m.dim_cd == 6
+  #assert m.dim_ru == 2
+
+  # Clean-up
+  os.unlink(filename)
+
+def test_JFAMachine():
+
+  # Creates a UBM
+  weights   = numpy.array([0.4, 0.6], 'float64')
+  means     = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
+  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
+  ubm           = GMMMachine(2,3)
+  ubm.weights   = weights
+  ubm.means     = means
+  ubm.variances = variances
+
+  # Creates a JFABase
+  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
+  V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
+  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
+  base = JFABase(ubm,2,2)
+  base.u = U
+  base.v = V
+  base.d = d
+
+  # Creates a JFAMachine
+  y = numpy.array([1,2], 'float64')
+  z = numpy.array([3,4,1,2,0,1], 'float64')
+  m = JFAMachine(base)
+  m.y = y
+  m.z = z
+  n_gaussians,dim,ru,rv = m.shape
+  supervector_length    = m.supervector_length  
+  
+  assert n_gaussians        == 2
+  assert dim                == 3
+  assert supervector_length == 6
+  assert ru                 == 2
+  assert rv                 == 2
+  assert (m.y == y).all()
+  assert (m.z == z).all()
+
+  # Saves and loads
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  m.save(bob.io.base.HDF5File(filename, 'w'))
+  m_loaded = JFAMachine(bob.io.base.HDF5File(filename))
+  m_loaded.jfa_base = base
+  assert m == m_loaded
+  assert (m != m_loaded) is False
+  assert m.is_similar_to(m_loaded)
+
+  # Copy constructor
+  mc = JFAMachine(m)
+  assert m == mc
+
+  # Variant
+  #mv = JFAMachine()
+  # Checks for correctness
+  #mv.jfa_base = base
+  #m.y = y
+  #m.z = z
+  #assert m.dim_c == 2
+  #assert m.dim_d == 3
+  #assert m.dim_cd == 6
+  #assert m.dim_ru == 2
+  #assert m.dim_rv == 2
+  #assert (m.y == y).all()
+  #assert (m.z == z).all()
+
+  # Defines GMMStats
+  gs = GMMStats(2,3)
+  log_likelihood = -3.
+  T = 1
+  n = numpy.array([0.4, 0.6], 'float64')
+  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
+  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
+  gs.log_likelihood = log_likelihood
+  gs.t = T
+  gs.n = n
+  gs.sum_px = sumpx
+  gs.sum_pxx = sumpxx
+
+  # Forward GMMStats and check estimated value of the x speaker factor
+  eps = 1e-10
+  x_ref = numpy.array([0.291042849767692, 0.310273618998444], 'float64')
+  score_ref = -2.111577181208289
+  score = m(gs)
+  assert numpy.allclose(m.x, x_ref, eps)
+  assert abs(score_ref-score) < eps
+
+  # x and Ux
+  x = numpy.ndarray((2,), numpy.float64)
+  m.estimate_x(gs, x)
+  n_gaussians, dim,_,_ = m.shape
+  x_py = estimate_x(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
+  assert numpy.allclose(x, x_py, eps)
+
+  ux = numpy.ndarray((6,), numpy.float64)
+  m.estimate_ux(gs, ux)
+  n_gaussians, dim,_,_ = m.shape  
+  ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
+  assert numpy.allclose(ux, ux_py, eps)
+  assert numpy.allclose(m.x, x, eps)
+
+  score = m.forward_ux(gs, ux)
+
+  assert abs(score_ref-score) < eps
+
+  # Clean-up
+  os.unlink(filename)
+
+def test_ISVMachine():
+
+  # Creates a UBM
+  weights = numpy.array([0.4, 0.6], 'float64')
+  means = numpy.array([[1, 6, 2], [4, 3, 2]], 'float64')
+  variances = numpy.array([[1, 2, 1], [2, 1, 2]], 'float64')
+  ubm = GMMMachine(2,3)
+  ubm.weights = weights
+  ubm.means = means
+  ubm.variances = variances
+
+  # Creates a ISVBaseMachine
+  U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
+  #V = numpy.array([[0], [0], [0], [0], [0], [0]], 'float64')
+  d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
+  base = ISVBase(ubm,2)
+  base.u = U
+  #base.v = V
+  base.d = d
+
+  # Creates a JFAMachine
+  z = numpy.array([3,4,1,2,0,1], 'float64')
+  m = ISVMachine(base)
+  m.z = z
+  
+  n_gaussians,dim,ru    = m.shape
+  supervector_length    = m.supervector_length  
+  assert n_gaussians          == 2
+  assert dim                  == 3
+  assert supervector_length   == 6
+  assert ru                   == 2
+  assert (m.z == z).all()
+
+  # Saves and loads
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  m.save(bob.io.base.HDF5File(filename, 'w'))
+  m_loaded = ISVMachine(bob.io.base.HDF5File(filename))
+  m_loaded.isv_base = base
+  assert m == m_loaded
+  assert (m != m_loaded) is False
+  assert m.is_similar_to(m_loaded)
+
+  # Copy constructor
+  mc = ISVMachine(m)
+  assert m == mc
+
+  # Variant
+  mv = ISVMachine(base)
+  # Checks for correctness
+  #mv.isv_base = base
+  m.z = z
+
+  n_gaussians,dim,ru    = m.shape
+  supervector_length    = m.supervector_length  
+  assert n_gaussians        == 2
+  assert dim                == 3
+  assert supervector_length == 6
+  assert ru                 == 2
+  assert (m.z == z).all()
+
+  # Defines GMMStats
+  gs = GMMStats(2,3)
+  log_likelihood = -3.
+  T = 1
+  n = numpy.array([0.4, 0.6], 'float64')
+  sumpx = numpy.array([[1., 2., 3.], [4., 5., 6.]], 'float64')
+  sumpxx = numpy.array([[10., 20., 30.], [40., 50., 60.]], 'float64')
+  gs.log_likelihood = log_likelihood
+  gs.t = T
+  gs.n = n
+  gs.sum_px = sumpx
+  gs.sum_pxx = sumpxx
+
+  # Forward GMMStats and check estimated value of the x speaker factor
+  eps = 1e-10
+  x_ref = numpy.array([0.291042849767692, 0.310273618998444], 'float64')
+  score_ref = -3.280498193082100
+
+  score = m(gs)
+  assert numpy.allclose(m.x, x_ref, eps)  
+  assert abs(score_ref-score) < eps
+
+  # Check using alternate forward() method
+  supervector_length = m.supervector_length
+  Ux = numpy.ndarray(shape=(supervector_length,), dtype=numpy.float64)
+  m.estimate_ux(gs, Ux)
+  score = m.forward_ux(gs, Ux)
+  assert abs(score_ref-score) < eps
+
+  # x and Ux
+  x = numpy.ndarray((2,), numpy.float64)
+  m.estimate_x(gs, x)
+  n_gaussians,dim,_    = m.shape  
+  x_py = estimate_x(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
+  assert numpy.allclose(x, x_py, eps)
+
+  ux = numpy.ndarray((6,), numpy.float64)
+  m.estimate_ux(gs, ux)
+  n_gaussians,dim,_    = m.shape  
+  ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
+  assert numpy.allclose(ux, ux_py, eps)
+  assert numpy.allclose(m.x, x, eps)
+
+  score = m.forward_ux(gs, ux)
+  assert abs(score_ref-score) < eps
+
+  # Clean-up
+  os.unlink(filename)
diff --git a/bob/learn/em/test/test_jfa_trainer.py b/bob/learn/em/test/test_jfa_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..044bbe5281357805fb42a667c1cfdfc85fcdae89
--- /dev/null
+++ b/bob/learn/em/test/test_jfa_trainer.py
@@ -0,0 +1,316 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Tue Jul 19 12:16:17 2011 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Test JFA trainer package
+"""
+
+import numpy
+import numpy.linalg
+
+import bob.core.random
+
+from bob.learn.em import GMMStats, GMMMachine, JFABase, JFAMachine, ISVBase, ISVMachine, JFATrainer, ISVTrainer
+
+
+def equals(x, y, epsilon):
+  return (abs(x - y) < epsilon).all()
+
+# Define Training set and initial values for tests
+F1 = numpy.array( [0.3833, 0.4516, 0.6173, 0.2277, 0.5755, 0.8044, 0.5301,
+  0.9861, 0.2751, 0.0300, 0.2486, 0.5357]).reshape((6,2))
+F2 = numpy.array( [0.0871, 0.6838, 0.8021, 0.7837, 0.9891, 0.5341, 0.0669,
+  0.8854, 0.9394, 0.8990, 0.0182, 0.6259]).reshape((6,2))
+F=[F1, F2]
+
+N1 = numpy.array([0.1379, 0.1821, 0.2178, 0.0418]).reshape((2,2))
+N2 = numpy.array([0.1069, 0.9397, 0.6164, 0.3545]).reshape((2,2))
+N=[N1, N2]
+
+gs11 = GMMStats(2,3)
+gs11.n = N1[:,0]
+gs11.sum_px = F1[:,0].reshape(2,3)
+gs12 = GMMStats(2,3)
+gs12.n = N1[:,1]
+gs12.sum_px = F1[:,1].reshape(2,3)
+
+gs21 = GMMStats(2,3)
+gs21.n = N2[:,0]
+gs21.sum_px = F2[:,0].reshape(2,3)
+gs22 = GMMStats(2,3)
+gs22.n = N2[:,1]
+gs22.sum_px = F2[:,1].reshape(2,3)
+
+TRAINING_STATS = [[gs11, gs12], [gs21, gs22]]
+UBM_MEAN = numpy.array([0.1806, 0.0451, 0.7232, 0.3474, 0.6606, 0.3839])
+UBM_VAR = numpy.array([0.6273, 0.0216, 0.9106, 0.8006, 0.7458, 0.8131])
+M_d = numpy.array([0.4106, 0.9843, 0.9456, 0.6766, 0.9883, 0.7668])
+M_v = numpy.array( [0.3367, 0.4116, 0.6624, 0.6026, 0.2442, 0.7505, 0.2955,
+  0.5835, 0.6802, 0.5518, 0.5278,0.5836]).reshape((6,2))
+M_u = numpy.array( [0.5118, 0.3464, 0.0826, 0.8865, 0.7196, 0.4547, 0.9962,
+  0.4134, 0.3545, 0.2177, 0.9713, 0.1257]).reshape((6,2))
+
+z1 = numpy.array([0.3089, 0.7261, 0.7829, 0.6938, 0.0098, 0.8432])
+z2 = numpy.array([0.9223, 0.7710, 0.0427, 0.3782, 0.7043, 0.7295])
+y1 = numpy.array([0.2243, 0.2691])
+y2 = numpy.array([0.6730, 0.4775])
+x1 = numpy.array([0.9976, 0.8116, 0.1375, 0.3900]).reshape((2,2))
+x2 = numpy.array([0.4857, 0.8944, 0.9274, 0.9175]).reshape((2,2))
+M_z=[z1, z2]
+M_y=[y1, y2]
+M_x=[x1, x2]
+
+
+def test_JFATrainer_updateYandV():
+  # test the JFATrainer for updating Y and V
+
+  v_ref = numpy.array( [0.7228, 0.7892, 0.6475, 0.6080, 0.8631, 0.8416,
+    1.6512, 1.6068, 0.0500, 0.0101, 0.4325, 0.6719]).reshape((6,2))
+
+  y1 = numpy.array([0., 0.])
+  y2 = numpy.array([0., 0.])
+  y3 = numpy.array([0.9630, 1.3868])
+  y4 = numpy.array([0.0426, -0.3721])
+  y=[y1, y2]
+
+  # call the updateY function
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+  m = JFABase(ubm,2,2)
+  t = JFATrainer(10)
+  t.initialize(m, TRAINING_STATS)
+  m.u = M_u
+  m.v = M_v
+  m.d = M_d
+  t.__X__ = M_x
+  t.__Y__ = y
+  t.__Z__ = M_z
+  t.e_step1(m, TRAINING_STATS)
+  t.m_step1(m, TRAINING_STATS)
+
+  # Expected results(JFA cookbook, matlab)
+  assert equals(t.__Y__[0], y3, 2e-4)
+  assert equals(t.__Y__[1], y4, 2e-4)
+  assert equals(m.v, v_ref, 2e-4)
+
+
+def test_JFATrainer_updateXandU():
+  # test the JFATrainer for updating X and U
+
+  u_ref = numpy.array( [0.6729, 0.3408, 0.0544, 1.0653, 0.5399, 1.3035,
+    2.4995, 0.4385, 0.1292, -0.0576, 1.1962, 0.0117]).reshape((6,2))
+
+  x1 = numpy.array([0., 0., 0., 0.]).reshape((2,2))
+  x2 = numpy.array([0., 0., 0., 0.]).reshape((2,2))
+  x3 = numpy.array([0.2143, 1.8275, 3.1979, 0.1227]).reshape((2,2))
+  x4 = numpy.array([-1.3861, 0.2359, 5.3326, -0.7914]).reshape((2,2))
+  x  = [x1, x2]
+
+  # call the updateX function
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+  m = JFABase(ubm,2,2)
+  t = JFATrainer(10)
+  t.initialize(m, TRAINING_STATS)
+  m.u = M_u
+  m.v = M_v
+  m.d = M_d
+  t.__X__ = x
+  t.__Y__ = M_y
+  t.__Z__ = M_z
+  t.e_step2(m, TRAINING_STATS)
+  t.m_step2(m, TRAINING_STATS)
+
+  # Expected results(JFA cookbook, matlab)
+  assert equals(t.__X__[0], x3, 2e-4)
+  assert equals(t.__X__[1], x4, 2e-4)
+  assert equals(m.u, u_ref, 2e-4)
+
+
+def test_JFATrainer_updateZandD():
+  # test the JFATrainer for updating Z and D
+
+  d_ref = numpy.array([0.3110, 1.0138, 0.8297, 1.0382, 0.0095, 0.6320])
+
+  z1 = numpy.array([0., 0., 0., 0., 0., 0.])
+  z2 = numpy.array([0., 0., 0., 0., 0., 0.])
+  z3_ref = numpy.array([0.3256, 1.8633, 0.6480, 0.8085, -0.0432, 0.2885])
+  z4_ref = numpy.array([-0.3324, -0.1474, -0.4404, -0.4529, 0.0484, -0.5848])
+  z=[z1, z2]
+
+  # call the updateZ function
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+  m = JFABase(ubm,2,2)
+  t = JFATrainer(10)
+  t.initialize(m, TRAINING_STATS)
+  m.u = M_u
+  m.v = M_v
+  m.d = M_d
+  t.__X__ = M_x
+  t.__Y__ = M_y
+  t.__Z__ = z
+  t.e_step3(m, TRAINING_STATS)
+  t.m_step3(m, TRAINING_STATS)
+
+  # Expected results(JFA cookbook, matlab)
+  assert equals(t.__Z__[0], z3_ref, 2e-4)
+  assert equals(t.__Z__[1], z4_ref, 2e-4)
+  assert equals(m.d, d_ref, 2e-4)
+
+
+def test_JFATrainAndEnrol():
+  # Train and enrol a JFAMachine
+
+  # Calls the train function
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+  mb = JFABase(ubm, 2, 2)
+  t = JFATrainer(10)
+  t.initialize(mb, TRAINING_STATS)
+  mb.u = M_u
+  mb.v = M_v
+  mb.d = M_d
+  t.train_loop(mb, TRAINING_STATS)
+
+  v_ref = numpy.array([[0.245364911936476, 0.978133261775424], [0.769646805052223, 0.940070736856596], [0.310779202800089, 1.456332053893072],
+        [0.184760934399551, 2.265139705602147], [0.701987784039800, 0.081632150899400], [0.074344030229297, 1.090248340917255]], 'float64')
+  u_ref = numpy.array([[0.049424652628448, 0.060480486336896], [0.178104127464007, 1.884873813495153], [1.204011484266777, 2.281351307871720],
+        [7.278512126426286, -0.390966087173334], [-0.084424326581145, -0.081725474934414], [4.042143689831097, -0.262576386580701]], 'float64')
+  d_ref = numpy.array([9.648467e-18, 2.63720683155e-12, 2.11822157653706e-10, 9.1047243e-17, 1.41163442535567e-10, 3.30581e-19], 'float64')
+
+  eps = 1e-10
+  assert numpy.allclose(mb.v, v_ref, eps)
+  assert numpy.allclose(mb.u, u_ref, eps)
+  assert numpy.allclose(mb.d, d_ref, eps)
+
+  # Calls the enrol function
+  m = JFAMachine(mb)
+
+  Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
+  Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
+  gse1 = GMMStats(2,3)
+  gse1.n = Ne[:,0]
+  gse1.sum_px = Fe[:,0].reshape(2,3)
+  gse2 = GMMStats(2,3)
+  gse2.n = Ne[:,1]
+  gse2.sum_px = Fe[:,1].reshape(2,3)
+
+  gse = [gse1, gse2]
+  t.enrol(m, gse, 5)
+
+  y_ref = numpy.array([0.555991469319657, 0.002773650670010], 'float64')
+  z_ref = numpy.array([8.2228e-20, 3.15216909492e-13, -1.48616735364395e-10, 1.0625905e-17, 3.7150503117895e-11, 1.71104e-19], 'float64')
+  assert numpy.allclose(m.y, y_ref, eps)
+  assert numpy.allclose(m.z, z_ref, eps)
+
+
+def test_ISVTrainAndEnrol():
+  # Train and enrol an 'ISVMachine'
+
+  eps = 1e-10
+  d_ref = numpy.array([0.39601136, 0.07348469, 0.47712682, 0.44738127, 0.43179856, 0.45086029], 'float64')
+  u_ref = numpy.array([[0.855125642430777, 0.563104284748032], [-0.325497865404680, 1.923598985291687], [0.511575659503837, 1.964288663083095], [9.330165761678115, 1.073623827995043], [0.511099245664012, 0.278551249248978], [5.065578541930268, 0.509565618051587]], 'float64')
+  z_ref = numpy.array([-0.079315777443826, 0.092702428248543, -0.342488761656616, -0.059922635809136 , 0.133539981073604, 0.213118695516570], 'float64')
+
+  # Calls the train function
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+  mb = ISVBase(ubm,2)
+  t = ISVTrainer(10, 4.)
+  #t.train(mb, TRAINING_STATS)
+  t.initialize(mb, TRAINING_STATS)
+  mb.u = M_u
+  for i in range(10):
+    t.e_step(mb, TRAINING_STATS)
+    t.m_step(mb)
+
+  assert numpy.allclose(mb.d, d_ref, eps)
+  assert numpy.allclose(mb.u, u_ref, eps)
+
+  # Calls the enrol function
+  m = ISVMachine(mb)
+
+  Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
+  Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
+  gse1 = GMMStats(2,3)
+  gse1.n = Ne[:,0]
+  gse1.sum_px = Fe[:,0].reshape(2,3)
+  gse2 = GMMStats(2,3)
+  gse2.n = Ne[:,1]
+  gse2.sum_px = Fe[:,1].reshape(2,3)
+
+  gse = [gse1, gse2]
+  t.enrol(m, gse, 5)
+  assert numpy.allclose(m.z, z_ref, eps)
+
+def test_JFATrainInitialize():
+  # Check that the initialization is consistent and using the rng (cf. issue #118)
+
+  eps = 1e-10
+
+  # UBM GMM
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+
+  ## JFA
+  jb = JFABase(ubm, 2, 2)
+  # first round
+  rng = bob.core.random.mt19937(0)
+  jt = JFATrainer(10)
+  jt.rng = rng
+  jt.initialize(jb, TRAINING_STATS)
+  u1 = jb.u
+  v1 = jb.v
+  d1 = jb.d
+
+  # second round
+  rng = bob.core.random.mt19937(0)
+  jt.rng = rng
+  jt.initialize(jb, TRAINING_STATS)
+  u2 = jb.u
+  v2 = jb.v
+  d2 = jb.d
+
+  assert numpy.allclose(u1, u2, eps)
+  assert numpy.allclose(v1, v2, eps)
+  assert numpy.allclose(d1, d2, eps)
+
+def test_ISVTrainInitialize():
+
+  # Check that the initialization is consistent and using the rng (cf. issue #118)
+  eps = 1e-10
+
+  # UBM GMM
+  ubm = GMMMachine(2,3)
+  ubm.mean_supervector = UBM_MEAN
+  ubm.variance_supervector = UBM_VAR
+
+  ## ISV
+  ib = ISVBase(ubm, 2)
+  # first round
+  rng = bob.core.random.mt19937(0)
+  it = ISVTrainer(10)
+  it.rng = rng
+  it.initialize(ib, TRAINING_STATS)
+  u1 = ib.u
+  d1 = ib.d
+
+  # second round
+  rng = bob.core.random.mt19937(0)
+  it.rng = rng
+  it.initialize(ib, TRAINING_STATS)
+  u2 = ib.u
+  d2 = ib.d
+
+  assert numpy.allclose(u1, u2, eps)
+  assert numpy.allclose(d1, d2, eps)
diff --git a/bob/learn/em/test/test_kmeans.py b/bob/learn/em/test/test_kmeans.py
new file mode 100644
index 0000000000000000000000000000000000000000..be80ba27a154cd549e048b0504b4c84c06df6a38
--- /dev/null
+++ b/bob/learn/em/test/test_kmeans.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Thu Feb 16 17:57:10 2012 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests the KMeans machine
+"""
+
+import os
+import numpy
+import tempfile
+
+import bob.io.base
+from bob.learn.em import KMeansMachine
+
+def equals(x, y, epsilon):
+  return (abs(x - y) < epsilon)
+
+def test_KMeansMachine():
+  # Test a KMeansMachine
+
+  means = numpy.array([[3, 70, 0], [4, 72, 0]], 'float64')
+  mean  = numpy.array([3,70,1], 'float64')
+
+  # Initializes a KMeansMachine
+  km = KMeansMachine(2,3)
+  km.means = means
+  assert km.shape == (2,3)
+
+  # Sets and gets
+  assert (km.means == means).all()
+  assert (km.get_mean(0) == means[0,:]).all()  
+  assert (km.get_mean(1) == means[1,:]).all()
+  km.set_mean(0, mean)
+  assert (km.get_mean(0) == mean).all()
+
+  # Distance and closest mean
+  eps = 1e-10
+
+  assert equals( km.get_distance_from_mean(mean, 0), 0, eps)
+  assert equals( km.get_distance_from_mean(mean, 1), 6, eps)  
+  
+  (index, dist) = km.get_closest_mean(mean)
+  
+  assert index == 0
+  assert equals( dist, 0, eps)
+  assert equals( km.get_min_distance(mean), 0, eps)
+
+  # Loads and saves
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  km.save(bob.io.base.HDF5File(filename, 'w'))
+  km_loaded = KMeansMachine(bob.io.base.HDF5File(filename))
+  assert km == km_loaded
+
+  # Resize
+  km.resize(4,5)
+  assert km.shape == (4,5)
+
+  # Copy constructor and comparison operators
+  km.resize(2,3)
+  km2 = KMeansMachine(km)
+  assert km2 == km
+  assert (km2 != km) is False
+  assert km2.is_similar_to(km)
+  means2 = numpy.array([[3, 70, 0], [4, 72, 2]], 'float64')
+  km2.means = means2
+  assert (km2 == km) is False
+  assert km2 != km
+  assert (km2.is_similar_to(km)) is False
+
+  # Clean-up
+  os.unlink(filename)
+  
+  
+def test_KMeansMachine2():
+  kmeans             = bob.learn.em.KMeansMachine(2,2)
+  kmeans.means       = numpy.array([[1.2,1.3],[0.2,-0.3]])
+
+  data               = numpy.array([
+                                  [1.,1],
+                                  [1.2, 3],
+                                  [0,0],
+                                  [0.3,0.2],
+                                  [0.2,0]
+                                 ])
+  variances, weights = kmeans.get_variances_and_weights_for_each_cluster(data)
+
+  variances_result = numpy.array([[ 0.01,1.],
+                                  [ 0.01555556, 0.00888889]])
+  weights_result = numpy.array([ 0.4, 0.6])
+  
+  assert equals(weights_result,weights, 1e-3).all()
+  assert equals(variances_result,variances,1e-3).all()
+ 
diff --git a/bob/learn/em/test/test_kmeans_trainer.py b/bob/learn/em/test/test_kmeans_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..b62e992501560e298546924f2d53ad0a42cdf9eb
--- /dev/null
+++ b/bob/learn/em/test/test_kmeans_trainer.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Fri Jan 18 12:46:00 2013 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Test K-Means algorithm
+"""
+import numpy
+
+import bob.core
+import bob.io
+from bob.io.base.test_utils import datafile
+
+from bob.learn.em import KMeansMachine, KMeansTrainer
+
+def equals(x, y, epsilon):
+  return (abs(x - y) < epsilon).all()
+
+def kmeans_plus_plus(machine, data, seed):
+  """Python implementation of K-Means++ (initialization)"""
+  n_data = data.shape[0]
+  rng = bob.core.random.mt19937(seed)
+  u = bob.core.random.uniform('int32', 0, n_data-1)
+  index = u(rng)
+  machine.set_mean(0, data[index,:])
+  weights = numpy.zeros(shape=(n_data,), dtype=numpy.float64)
+
+  for m in range(1,machine.dim_c):
+    for s in range(n_data):
+      s_cur = data[s,:]
+      w_cur = machine.get_distance_from_mean(s_cur, 0)
+      for i in range(m):
+        w_cur = min(machine.get_distance_from_mean(s_cur, i), w_cur)
+      weights[s] = w_cur
+    weights *= weights
+    weights /= numpy.sum(weights)
+    d = bob.core.random.discrete('int32', weights)
+    index = d(rng)
+    machine.set_mean(m, data[index,:])
+
+
+def NormalizeStdArray(path):
+  array = bob.io.base.load(path).astype('float64')
+  std = array.std(axis=0)
+  return (array/std, std)
+
+def multiplyVectorsByFactors(matrix, vector):
+  for i in range(0, matrix.shape[0]):
+    for j in range(0, matrix.shape[1]):
+      matrix[i, j] *= vector[j]
+
+def flipRows(array):
+  if len(array.shape) == 2:
+    return numpy.array([numpy.array(array[1, :]), numpy.array(array[0, :])], 'float64')
+  elif len(array.shape) == 1:
+    return numpy.array([array[1], array[0]], 'float64')
+  else:
+    raise Exception('Input type not supportd by flipRows')
+
+if hasattr(KMeansTrainer, 'KMEANS_PLUS_PLUS'):
+  def test_kmeans_plus_plus():
+
+    # Tests the K-Means++ initialization
+    dim_c = 5
+    dim_d = 7
+    n_samples = 150
+    data = numpy.random.randn(n_samples,dim_d)
+    seed = 0
+
+    # C++ implementation
+    machine = KMeansMachine(dim_c, dim_d)
+    trainer = KMeansTrainer()
+    trainer.rng = bob.core.random.mt19937(seed)
+    trainer.initialization_method = 'KMEANS_PLUS_PLUS'
+    trainer.initialize(machine, data)
+
+    # Python implementation
+    py_machine = KMeansMachine(dim_c, dim_d)
+    kmeans_plus_plus(py_machine, data, seed)
+    assert equals(machine.means, py_machine.means, 1e-8)
+
+def test_kmeans_noduplicate():
+  # Data/dimensions
+  dim_c = 2
+  dim_d = 3
+  seed = 0
+  data = numpy.array([[1,2,3],[1,2,3],[1,2,3],[4,5,6.]])
+  # Defines machine and trainer
+  machine = KMeansMachine(dim_c, dim_d)
+  trainer = KMeansTrainer()
+  trainer.rng = bob.core.random.mt19937(seed)
+  trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
+  trainer.initialize(machine, data)
+  # Makes sure that the two initial mean vectors selected are different
+  assert equals(machine.get_mean(0), machine.get_mean(1), 1e-8) == False
+
+
+def test_kmeans_a():
+
+  # Trains a KMeansMachine
+  # This files contains draws from two 1D Gaussian distributions:
+  #   * 100 samples from N(-10,1)
+  #   * 100 samples from N(10,1)
+  data = bob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__, path="../data/"))
+
+  machine = KMeansMachine(2, 1)
+
+  trainer = KMeansTrainer()
+  trainer.train(machine, data)
+
+  [variances, weights] = machine.get_variances_and_weights_for_each_cluster(data)
+  variances_b = numpy.ndarray(shape=(2,1), dtype=numpy.float64)
+  weights_b = numpy.ndarray(shape=(2,), dtype=numpy.float64)
+  machine.__get_variances_and_weights_for_each_cluster_init__(variances_b, weights_b)
+  machine.__get_variances_and_weights_for_each_cluster_acc__(data, variances_b, weights_b)
+  machine.__get_variances_and_weights_for_each_cluster_fin__(variances_b, weights_b)
+  m1 = machine.get_mean(0)
+  m2 = machine.get_mean(1)
+
+  ## Check means [-10,10] / variances [1,1] / weights [0.5,0.5]
+  if(m1<m2): means=numpy.array(([m1[0],m2[0]]), 'float64')
+  else: means=numpy.array(([m2[0],m1[0]]), 'float64')
+  assert equals(means, numpy.array([-10.,10.]), 2e-1)
+  assert equals(variances, numpy.array([1.,1.]), 2e-1)
+  assert equals(weights, numpy.array([0.5,0.5]), 1e-3)
+
+  assert equals(variances, variances_b, 1e-8)
+  assert equals(weights, weights_b, 1e-8)
+
+
+
+def test_kmeans_b():
+
+  # Trains a KMeansMachine
+  (arStd,std) = NormalizeStdArray(datafile("faithful.torch3.hdf5", __name__, path="../data/"))
+
+  machine = KMeansMachine(2, 2)
+
+  trainer = KMeansTrainer()
+  #trainer.seed = 1337
+  trainer.train(machine, arStd)
+
+  [variances, weights] = machine.get_variances_and_weights_for_each_cluster(arStd)
+  means = machine.means
+
+  multiplyVectorsByFactors(means, std)
+  multiplyVectorsByFactors(variances, std ** 2)
+
+  gmmWeights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__, path="../data/"))
+  gmmMeans = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__, path="../data/"))
+  gmmVariances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__, path="../data/"))
+
+  if (means[0, 0] < means[1, 0]):
+    means = flipRows(means)
+    variances = flipRows(variances)
+    weights = flipRows(weights)
+
+  assert equals(means, gmmMeans, 1e-3)
+  assert equals(weights, gmmWeights, 1e-3)
+  assert equals(variances, gmmVariances, 1e-3)
+  
+  # Check comparison operators
+  trainer1 = KMeansTrainer()
+  trainer2 = KMeansTrainer()
+  #trainer1.rng = trainer2.rng
+
+  #assert trainer1 == trainer2
+  #assert (trainer1 != trainer2) is False
+  trainer1.max_iterations = 1337
+  #assert (trainer1 == trainer2) is False
+  #assert trainer1 != trainer2
+
+  # Check that there is no duplicate means during initialization
+  machine = KMeansMachine(2, 1)
+  trainer = KMeansTrainer()
+  trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
+  data = numpy.array([[1.], [1.], [1.], [1.], [1.], [1.], [2.], [3.]])
+  trainer.train(machine, data)
+  assert (numpy.isnan(machine.means).any()) == False
\ No newline at end of file
diff --git a/bob/learn/em/test/test_linearscoring.py b/bob/learn/em/test/test_linearscoring.py
new file mode 100644
index 0000000000000000000000000000000000000000..9958e20bd2e91c09d7adc9fb655897ba9e749ff6
--- /dev/null
+++ b/bob/learn/em/test/test_linearscoring.py
@@ -0,0 +1,128 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Francois Moulin <Francois.Moulin@idiap.ch>
+# Wed Jul 13 16:00:04 2011 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests on the LinearScoring function
+"""
+
+import numpy
+
+from bob.learn.em import GMMMachine, GMMStats, linear_scoring
+
+def test_LinearScoring():
+
+  ubm = GMMMachine(2, 2)
+  ubm.weights   = numpy.array([0.5, 0.5], 'float64')
+  ubm.means     = numpy.array([[3, 70], [4, 72]], 'float64')
+  ubm.variances = numpy.array([[1, 10], [2, 5]], 'float64')
+  ubm.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
+
+  model1 = GMMMachine(2, 2)
+  model1.weights   = numpy.array([0.5, 0.5], 'float64')
+  model1.means     = numpy.array([[1, 2], [3, 4]], 'float64')
+  model1.variances = numpy.array([[9, 10], [11, 12]], 'float64')
+  model1.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
+
+  model2 = GMMMachine(2, 2)
+  model2.weights   = numpy.array([0.5, 0.5], 'float64')
+  model2.means     = numpy.array([[5, 6], [7, 8]], 'float64')
+  model2.variances = numpy.array([[13, 14], [15, 16]], 'float64')
+  model2.variance_thresholds = numpy.array([[0, 0], [0, 0]], 'float64')
+
+  stats1 = GMMStats(2, 2)
+  stats1.sum_px = numpy.array([[1, 2], [3, 4]], 'float64')
+  stats1.n = numpy.array([1, 2], 'float64')
+  stats1.t = 1+2
+
+  stats2 = GMMStats(2, 2)
+  stats2.sum_px = numpy.array([[5, 6], [7, 8]], 'float64')
+  stats2.n = numpy.array([3, 4], 'float64')
+  stats2.t = 3+4
+
+  stats3 = GMMStats(2, 2)
+  stats3.sum_px = numpy.array([[5, 6], [7, 3]], 'float64')
+  stats3.n = numpy.array([3, 4], 'float64')
+  stats3.t = 3+4
+
+  test_channeloffset = [numpy.array([9, 8, 7, 6], 'float64'), numpy.array([5, 4, 3, 2], 'float64'), numpy.array([1, 0, 1, 2], 'float64')]
+
+  # Reference scores (from Idiap internal matlab implementation)
+  ref_scores_00 = numpy.array([[2372.9, 5207.7, 5275.7], [2215.7, 4868.1, 4932.1]], 'float64')
+  ref_scores_01 = numpy.array( [[790.9666666666667, 743.9571428571428, 753.6714285714285], [738.5666666666667, 695.4428571428572, 704.5857142857144]], 'float64')
+  ref_scores_10 = numpy.array([[2615.5, 5434.1, 5392.5], [2381.5, 4999.3, 5022.5]], 'float64')
+  ref_scores_11 = numpy.array([[871.8333333333332, 776.3000000000001, 770.3571428571427], [793.8333333333333, 714.1857142857143, 717.5000000000000]], 'float64')
+
+
+  # 1/ Use GMMMachines
+  # 1/a/ Without test_channelOffset, without frame-length normalisation
+  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3])
+  assert (abs(scores - ref_scores_00) < 1e-7).all()
+
+  # 1/b/ Without test_channelOffset, with frame-length normalisation
+  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], [], True)
+  assert (abs(scores - ref_scores_01) < 1e-7).all()
+  #scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], (), True)
+  #assert (abs(scores - ref_scores_01) < 1e-7).all()
+  #scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], None, True)
+  #assert (abs(scores - ref_scores_01) < 1e-7).all()
+
+  # 1/c/ With test_channelOffset, without frame-length normalisation
+  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset)
+  assert (abs(scores - ref_scores_10) < 1e-7).all()
+
+  # 1/d/ With test_channelOffset, with frame-length normalisation
+  scores = linear_scoring([model1, model2], ubm, [stats1, stats2, stats3], test_channeloffset, True)
+  assert (abs(scores - ref_scores_11) < 1e-7).all()
+
+
+  # 2/ Use mean/variance supervectors
+  # 2/a/ Without test_channelOffset, without frame-length normalisation
+  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3])
+  assert (abs(scores - ref_scores_00) < 1e-7).all()
+
+  # 2/b/ Without test_channelOffset, with frame-length normalisation
+  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], [], True)
+  assert (abs(scores - ref_scores_01) < 1e-7).all()
+
+  # 2/c/ With test_channelOffset, without frame-length normalisation
+  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], test_channeloffset)
+  assert (abs(scores - ref_scores_10) < 1e-7).all()
+
+  # 2/d/ With test_channelOffset, with frame-length normalisation
+  scores = linear_scoring([model1.mean_supervector, model2.mean_supervector], ubm.mean_supervector, ubm.variance_supervector, [stats1, stats2, stats3], test_channeloffset, True)
+  assert (abs(scores - ref_scores_11) < 1e-7).all()
+
+
+  # 3/ Using single model/sample
+  # 3/a/ without frame-length normalisation
+  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0])
+  assert abs(score - ref_scores_10[0,0]) < 1e-7
+  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1])
+  assert abs(score - ref_scores_10[0,1]) < 1e-7
+  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2])
+  assert abs(score - ref_scores_10[0,2]) < 1e-7
+  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0])
+  assert abs(score - ref_scores_10[1,0]) < 1e-7
+  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1])
+  assert abs(score - ref_scores_10[1,1]) < 1e-7
+  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2])
+  assert abs(score - ref_scores_10[1,2]) < 1e-7
+
+
+  # 3/b/ without frame-length normalisation
+  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0], True)
+  assert abs(score - ref_scores_11[0,0]) < 1e-7
+  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1], True)
+  assert abs(score - ref_scores_11[0,1]) < 1e-7
+  score = linear_scoring(model1.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2], True)
+  assert abs(score - ref_scores_11[0,2]) < 1e-7
+  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats1, test_channeloffset[0], True)
+  assert abs(score - ref_scores_11[1,0]) < 1e-7
+  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats2, test_channeloffset[1], True)
+  assert abs(score - ref_scores_11[1,1]) < 1e-7
+  score = linear_scoring(model2.mean_supervector, ubm.mean_supervector, ubm.variance_supervector, stats3, test_channeloffset[2], True)
+  assert abs(score - ref_scores_11[1,2]) < 1e-7
+
diff --git a/bob/learn/em/test/test_plda.py b/bob/learn/em/test/test_plda.py
new file mode 100644
index 0000000000000000000000000000000000000000..062857e6aa74ec9206fa72ac905c59773355ff57
--- /dev/null
+++ b/bob/learn/em/test/test_plda.py
@@ -0,0 +1,565 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Sat Oct 22 23:01:09 2011 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests PLDA machine
+"""
+
+import numpy
+import os
+import tempfile
+import nose.tools
+import math
+
+import bob.io.base
+
+from bob.learn.em import PLDABase, PLDAMachine
+
+# Defines common variables globally
+# Dimensionalities
+C_dim_d = 7
+C_dim_f = 2
+C_dim_g = 3
+# Values for F and G
+C_G=numpy.array([-1.1424, -0.5044, -0.1917,
+      -0.6249,  0.1021, -0.8658,
+      -1.1687,  1.1963,  0.1807,
+      0.3926,  0.1203,  1.2665,
+      1.3018, -1.0368, -0.2512,
+      -0.5936, -0.8571, -0.2046,
+      0.4364, -0.1699, -2.2015], 'float64').reshape(C_dim_d, C_dim_g)
+# F <-> PCA on G
+C_F=numpy.array([-0.054222647972093, -0.000000000783146,
+      0.596449127693018,  0.000000006265167,
+      0.298224563846509,  0.000000003132583,
+      0.447336845769764,  0.000000009397750,
+      -0.108445295944185, -0.000000001566292,
+      -0.501559493741856, -0.000000006265167,
+      -0.298224563846509, -0.000000003132583], 'float64').reshape(C_dim_d, C_dim_f)
+
+def equals(x, y, epsilon):
+  return (abs(x - y) < epsilon).all()
+
+def compute_i_sigma(sigma):
+  # Inverse of a diagonal matrix (represented by a 1D numpy array)
+  return (1. / sigma)
+
+def compute_alpha(G, sigma):
+  # alpha = (Id + G^T.sigma^-1.G)^-1 = \mathcal{G}
+  dim_g = G.shape[1]
+  isigma = numpy.diag(compute_i_sigma(sigma))
+  return numpy.linalg.inv(numpy.eye(dim_g) + numpy.dot(numpy.dot(G.transpose(), isigma), G))
+
+def compute_beta(G, sigma):
+  # beta = (sigma + G.G^T)^-1 = sigma^-1 - sigma^-1.G.alpha.G^T.sigma^-1 = \mathcal{S}
+  isigma = numpy.diag(compute_i_sigma(sigma))
+  gt_isigma = numpy.dot(G.transpose(), isigma)
+  alpha = compute_alpha(G, sigma)
+  return (isigma - numpy.dot(numpy.dot(gt_isigma.transpose(), alpha), gt_isigma))
+
+def compute_gamma(F, G, sigma, a):
+  # gamma_a = (Id + a.F^T.beta.F)^-1 = \mathcal{F}_{a}
+  dim_f = F.shape[1]
+  beta = compute_beta(G, sigma)
+  return numpy.linalg.inv(numpy.eye(dim_f) + a * numpy.dot(numpy.dot(F.transpose(), beta), F))
+
+def compute_ft_beta(F, G, sigma):
+  # F^T.beta = F^T.\mathcal{S}
+  beta = compute_beta(G, sigma)
+  return numpy.dot(numpy.transpose(F), beta)
+
+def compute_gt_i_sigma(G, sigma):
+  # G^T.sigma^-1
+  isigma = compute_i_sigma(sigma)
+  return numpy.transpose(G) * isigma
+
+def compute_logdet_alpha(G, sigma):
+  # \log(\det(\alpha)) = \log(\det(\mathcal{G}))
+  alpha = compute_alpha(G, sigma)
+  return math.log(numpy.linalg.det(alpha))
+
+def compute_logdet_sigma(sigma):
+  # \log(\det(\sigma)) = \log(\det(\sigma)) = \log(\prod(\sigma_{i}))
+  return math.log(numpy.prod(sigma))
+
+def compute_loglike_constterm(F, G, sigma, a):
+  # loglike_constterm[a] = a/2 * ( -D*\log(2*pi) -\log|\sigma| +\log|\alpha| +\log|\gamma_a|)
+  gamma_a = compute_gamma(F, G, sigma, a)
+  logdet_gamma_a = math.log(abs(numpy.linalg.det(gamma_a)))
+  ah = a/2.
+  dim_d =  F.shape[0]
+  logdet_sigma = compute_logdet_sigma(sigma)
+  logdet_alpha = compute_logdet_alpha(G, sigma)
+  res = -ah*dim_d*math.log(2*math.pi) - ah*logdet_sigma + ah*logdet_alpha + logdet_gamma_a/2.
+  return res;
+
+def compute_log_likelihood_point_estimate(observation, mu, F, G, sigma, hi, wij):
+  """
+  This function computes p(x_{ij} | h_{i}, w_{ij}, \Theta), which is given by
+  N_{x}[\mu + Fh_{i} + Gw_{ij} + epsilon_{ij}, \Sigma], N_{x} being a
+  Gaussian distribution. As it returns the corresponding log likelihood,
+  this is given by the sum of the following three terms:
+  C1 = -dim_d/2 log(2pi)
+  C2 = -1/2 log(det(\Sigma))
+  C3 = -1/2 (x_{ij}-\mu-Fh_{i}-Gw_{ij})^{T}\Sigma^{-1}(x_{ij}-\mu-Fh_{i}-Gw_{ij})
+  """
+
+  ### Pre-computes some of the constants
+  dim_d          = observation.shape[0]             # A scalar
+  log_2pi        = numpy.log(2. * numpy.pi)        # A scalar
+  C1             = -(dim_d / 2.) * log_2pi         # A scalar
+  C2             = -(1. / 2.) * numpy.sum( numpy.log(sigma) ) # (dim_d, 1)
+
+  ### Subtract the identity and session components from the observed vector.
+  session_plus_identity  = numpy.dot(F, hi) + numpy.dot(G, wij)
+  normalised_observation = numpy.reshape(observation - mu - session_plus_identity, (dim_d,1))
+  ### Now calculate C3
+  sigma_inverse  = numpy.reshape(1. / sigma, (dim_d,1))                      # (dim_d, 1)
+  C3             = -(1. / 2.) * numpy.sum(normalised_observation * sigma_inverse * normalised_observation)
+
+  ### Returns the log likelihood
+  log_likelihood     = C1 + C2 + C3
+  return (log_likelihood)
+
+
+def compute_log_likelihood(observations, mu, F, G, sigma):
+  """
+  This function computes the log-likelihood of the observations given the parameters
+  of the PLDA model. This is done by fulling integrating out the latent variables.
+  """
+  # Work out the number of samples that we have and normalise the data.
+  J_i                = observations.shape[0];                  # An integer > 0
+  norm_observations  = observations - numpy.tile(mu, [J_i,1]);        # (J_i, D_x)
+
+  # There are three terms that need to be computed: C1, C2 and C3
+
+  # 1. Computes C1
+  # C1 = - J_{i} * dim_d/2 log(2*pi)
+  dim_d          = observations.shape[1]             # A scalar
+  dim_f          = F.shape[1]
+  log_2pi        = numpy.log(2. * numpy.pi);        # A scalar
+  C1             = - J_i * (dim_d / 2.) * log_2pi;         # A scalar
+
+  # 2. Computes C2
+  # C2 = - J_i/2 * [log(det(sigma)) - log(det(alpha^-1))] + log(det(gamma_{J_i}))/2
+  ld_sigma = compute_logdet_sigma(sigma)
+  ld_alpha = compute_logdet_alpha(G, sigma)
+  gamma = compute_gamma(F, G, sigma, J_i)
+  ld_gamma = math.log(numpy.linalg.det(gamma))
+  C2 = - J_i/2.*(ld_sigma - ld_alpha)  + ld_gamma/2.
+
+  # 3. Computes C3
+  # This is a quadratic part and consists of
+  # C3   = -0.5 * sum x^T beta x + 0.5 * Quadratic term in x
+  # C3   = -0.5 * (C3a - C3b)
+  C3a                  = 0.0;
+  C3b_sum_part         = numpy.zeros((dim_f,1));
+  isigma               = numpy.diag(compute_i_sigma(sigma))
+  beta                 = compute_beta(G, sigma)
+  ft_beta              = numpy.dot(numpy.transpose(F), beta)
+  for j in range(0, J_i):
+    ### Calculations for C3a
+    current_vector           = numpy.reshape(norm_observations[j,:], (dim_d,1));  # (D_x, 1)
+    vector_E                 = numpy.dot(beta, current_vector);                   # (D_x, 1)
+    current_result           = numpy.dot(current_vector.transpose(), vector_E);   # A floating point value
+    C3a                      = C3a + current_result[0][0];                        # A floating point value
+    ### Calculations for C3b
+    C3b_sum_part             = C3b_sum_part + numpy.dot(ft_beta, current_vector);  # (nf, 1)
+
+  ### Final calculations for C3b, using the matrix gamma_{J_i}
+  C3b                        = numpy.dot(numpy.dot(C3b_sum_part.transpose(), gamma), C3b_sum_part);
+  C3                         = -0.5 * (C3a - C3b[0][0]);
+
+  return C1 + C2 + C3
+
+
+def test_plda_basemachine():
+  # Data used for performing the tests
+  sigma = numpy.ndarray(C_dim_d, 'float64')
+  sigma.fill(0.01)
+  mu = numpy.ndarray(C_dim_d, 'float64')
+  mu.fill(0)
+
+  # Defines reference results based on matlab
+  alpha_ref = numpy.array([ 0.002189051545735,  0.001127099941432,
+    -0.000145483208153, 0.001127099941432,  0.003549267943741,
+    -0.000552001405453, -0.000145483208153, -0.000552001405453,
+    0.001440505362615], 'float64').reshape(C_dim_g, C_dim_g)
+  beta_ref  = numpy.array([ 50.587191765140361, -14.512478352504877,
+    -0.294799164567830,  13.382002504394316,  9.202063877660278,
+    -43.182264846086497,  11.932345916716455, -14.512478352504878,
+    82.320149045633045, -12.605578822979698,  19.618675892079366,
+    13.033691341150439,  -8.004874490989799, -21.547363307109187,
+    -0.294799164567832, -12.605578822979696,  52.123885798398241,
+    4.363739008635009, 44.847177605628545,  16.438137537463710,
+    5.137421840557050, 13.382002504394316,  19.618675892079366,
+    4.363739008635011,  75.070401560513488, -4.515472972526140,
+    9.752862741017488,  34.196127678931106, 9.202063877660285,
+    13.033691341150439,  44.847177605628552,  -4.515472972526142,
+    56.189416227691098,  -7.536676357632515, -10.555735414707383,
+    -43.182264846086497,  -8.004874490989799,  16.438137537463703,
+    9.752862741017490, -7.536676357632518,  56.430571485722126,
+    9.471758169835317, 11.932345916716461, -21.547363307109187,
+    5.137421840557051,  34.196127678931099, -10.555735414707385,
+    9.471758169835320,  27.996266602110637], 'float64').reshape(C_dim_d, C_dim_d)
+  gamma3_ref = numpy.array([ 0.005318799462241, -0.000000012993151,
+    -0.000000012993151,  0.999999999999996], 'float64').reshape(C_dim_f, C_dim_f)
+
+  # Constructor tests
+  #m = PLDABase()
+  #assert m.dim_d == 0
+  #assert m.dim_f == 0
+  #assert m.dim_g == 0
+  #del m
+  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
+  assert m.shape[0] == C_dim_d
+  assert m.shape[1] == C_dim_f
+  assert m.shape[2] == C_dim_g
+  assert abs(m.variance_threshold - 0.) < 1e-10
+  del m
+  m = PLDABase(C_dim_d, C_dim_f, C_dim_g, 1e-2)
+  assert m.shape[0] == C_dim_d
+  assert m.shape[1] == C_dim_f
+  assert m.shape[2] == C_dim_g
+  assert abs(m.variance_threshold - 1e-2) < 1e-10
+  del m
+
+  # Defines base machine
+  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
+  #m.resize(C_dim_d, C_dim_f, C_dim_g)
+  # Sets the current mu, F, G and sigma
+  m.mu = mu
+  m.f = C_F
+  m.g = C_G
+  m.sigma = sigma
+  gamma3 = m.get_add_gamma(3).copy()
+  constTerm3 = m.get_add_log_like_const_term(3)
+
+  # Compares precomputed values to matlab reference
+  for ii in range(m.__alpha__.shape[0]):
+    for jj in range(m.__alpha__.shape[1]):
+      absdiff = abs(m.__alpha__[ii,jj]- alpha_ref[ii,jj])
+      assert absdiff < 1e-10, 'PLDABase alpha matrix does not match reference at (%d,%d) to 10^-10: |%g-%g| = %g' % (ii, jj, m.__alpha__[ii,jj], alpha_ref[ii,jj], absdiff)
+  assert equals(m.__alpha__, alpha_ref, 1e-10)
+  assert equals(m.__beta__, beta_ref, 1e-10)
+  assert equals(gamma3, gamma3_ref, 1e-10)
+
+  # Compares precomputed values to the ones returned by python implementation
+  assert equals(m.__isigma__, compute_i_sigma(sigma), 1e-10)
+  assert equals(m.__alpha__, compute_alpha(C_G,sigma), 1e-10)
+  assert equals(m.__beta__, compute_beta(C_G,sigma), 1e-10)
+  assert equals(m.get_add_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
+  assert m.has_gamma(3)
+  assert equals(m.get_gamma(3), compute_gamma(C_F,C_G,sigma,3), 1e-10)
+  assert equals(m.__ft_beta__, compute_ft_beta(C_F,C_G,sigma), 1e-10)
+  assert equals(m.__gt_i_sigma__, compute_gt_i_sigma(C_G,sigma), 1e-10)
+  assert math.fabs(m.__logdet_alpha__ - compute_logdet_alpha(C_G,sigma)) < 1e-10
+  assert math.fabs(m.__logdet_sigma__ - compute_logdet_sigma(sigma)) < 1e-10
+  assert abs(m.get_add_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
+  assert m.has_log_like_const_term(3)
+  assert abs(m.get_log_like_const_term(3) - compute_loglike_constterm(C_F,C_G,sigma,3)) < 1e-10
+
+  # Defines base machine
+  del m
+  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
+  # Sets the current mu, F, G and sigma
+  m.mu = mu
+  m.f = C_F
+  m.g = C_G
+  m.sigma = sigma
+  gamma3 = m.get_add_gamma(3).copy()
+  constTerm3 = m.get_add_log_like_const_term(3)
+
+  # Compares precomputed values to matlab reference
+  assert equals(m.__alpha__, alpha_ref, 1e-10)
+  assert equals(m.__beta__, beta_ref, 1e-10)
+  assert equals(gamma3, gamma3_ref, 1e-10)
+
+  # values before being saved
+  isigma = m.__isigma__.copy()
+  alpha = m.__alpha__.copy()
+  beta = m.__beta__.copy()
+  FtBeta = m.__ft_beta__.copy()
+  GtISigma = m.__gt_i_sigma__.copy()
+  logdetAlpha = m.__logdet_alpha__
+  logdetSigma = m.__logdet_sigma__
+
+  # Saves to file, loads and compares to original
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  m.save(bob.io.base.HDF5File(filename, 'w'))
+  m_loaded = PLDABase(bob.io.base.HDF5File(filename))
+
+  # Compares the values loaded with the former ones
+  assert m_loaded == m
+  assert (m_loaded != m) is False
+  assert equals(m_loaded.mu, mu, 1e-10)
+  assert equals(m_loaded.f, C_F, 1e-10)
+  assert equals(m_loaded.g, C_G, 1e-10)
+  assert equals(m_loaded.sigma, sigma, 1e-10)
+  assert equals(m_loaded.__isigma__, isigma, 1e-10)
+  assert equals(m_loaded.__alpha__, alpha, 1e-10)
+  assert equals(m_loaded.__beta__, beta, 1e-10)
+  assert equals(m_loaded.__ft_beta__, FtBeta, 1e-10)
+  assert equals(m_loaded.__gt_i_sigma__, GtISigma, 1e-10)
+  assert abs(m_loaded.__logdet_alpha__ - logdetAlpha) < 1e-10
+  assert abs(m_loaded.__logdet_sigma__ - logdetSigma) < 1e-10
+  assert m_loaded.has_gamma(3)
+  assert equals(m_loaded.get_gamma(3), gamma3_ref, 1e-10)
+  assert equals(m_loaded.get_add_gamma(3), gamma3_ref, 1e-10)
+  assert m_loaded.has_log_like_const_term(3)
+  assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
+
+  # Compares the values loaded with the former ones when copying
+  m_copy = PLDABase(m_loaded)
+  assert m_loaded == m_copy
+  assert (m_loaded != m_copy) is False
+  # Test clear_maps method
+  assert m_copy.has_gamma(3)
+  assert m_copy.has_log_like_const_term(3)
+  m_copy.clear_maps()
+  assert (m_copy.has_gamma(3)) is False
+  assert (m_copy.has_log_like_const_term(3)) is False
+
+  # Check variance flooring thresholds-related methods
+  v_zo = numpy.array([0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01])
+  v_zo_ = 0.01
+  v_zzo = numpy.array([0.001, 0.001, 0.001, 0.001, 0.001, 0.001, 0.001])
+  v_zzo_ = 0.001
+  m_copy.variance_threshold = v_zo_
+  assert (m_loaded == m_copy) is False
+  assert m_loaded != m_copy
+  m_copy.variance_threshold = v_zzo_
+  m_copy.sigma = v_zo
+  assert equals(m_copy.sigma, v_zo, 1e-10)
+  m_copy.variance_threshold = v_zo_
+  m_copy.sigma = v_zzo
+  assert equals(m_copy.sigma, v_zo, 1e-10)
+  m_copy.variance_threshold = v_zzo_
+  m_copy.sigma = v_zzo
+  assert equals(m_copy.sigma, v_zzo, 1e-10)
+  m_copy.variance_threshold = v_zo_
+  assert equals(m_copy.sigma, v_zo, 1e-10)
+
+  # Clean-up
+  os.unlink(filename)
+
+
+def test_plda_basemachine_loglikelihood_pointestimate():
+
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  sigma = numpy.ndarray(C_dim_d, 'float64')
+  sigma.fill(0.01)
+  mu = numpy.ndarray(C_dim_d, 'float64')
+  mu.fill(0)
+  xij = numpy.array([0.7, 1.3, 2.5, 0.3, 1.3, 2.7, 0.9])
+  hi = numpy.array([-0.5, 0.5])
+  wij = numpy.array([-0.1, 0.2, 0.3])
+
+  m = PLDABase(C_dim_d, C_dim_f, C_dim_g)
+  # Sets the current mu, F, G and sigma
+  m.mu = mu
+  m.f = C_F
+  m.g = C_G
+  m.sigma = sigma
+
+  #assert equals(m.compute_log_likelihood_point_estimate(xij, hi, wij), compute_log_likelihood_point_estimate(xij, mu, C_F, C_G, sigma, hi, wij), 1e-6)
+  log_likelihood_point_estimate        = m.compute_log_likelihood_point_estimate(xij, hi, wij)
+  log_likelihood_point_estimate_python = compute_log_likelihood_point_estimate(xij,         mu, C_F, C_G, sigma, hi, wij)
+  assert equals(log_likelihood_point_estimate, log_likelihood_point_estimate_python, 1e-6)
+
+
+def test_plda_machine():
+
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  sigma = numpy.ndarray(C_dim_d, 'float64')
+  sigma.fill(0.01)
+  mu = numpy.ndarray(C_dim_d, 'float64')
+  mu.fill(0)
+
+  # Defines base machine
+  mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
+  # Sets the current mu, F, G and sigma
+  mb.mu = mu
+  mb.f = C_F
+  mb.g = C_G
+  mb.sigma = sigma
+
+  # Test constructors and dim getters
+  m = PLDAMachine(mb)
+  assert m.shape[0] == C_dim_d
+  assert m.shape[1]== C_dim_f
+  assert m.shape[2] == C_dim_g
+
+  m0 = PLDAMachine(mb)
+  #m0.plda_base = mb
+  assert m0.shape[0]  == C_dim_d
+  assert m0.shape[1]  == C_dim_f
+  assert m0.shape[2]  == C_dim_g
+
+  # Defines machine
+  n_samples = 2
+  WSumXitBetaXi = 0.37
+  weightedSum = numpy.array([1.39,0.54], 'float64')
+  log_likelihood = -0.22
+
+  m.n_samples = n_samples
+  m.w_sum_xit_beta_xi = WSumXitBetaXi
+  m.weighted_sum = weightedSum
+  m.log_likelihood = log_likelihood
+
+  gamma3 = m.get_add_gamma(3).copy()
+  constTerm3 = m.get_add_log_like_const_term(3)
+
+  # Saves to file, loads and compares to original
+  filename = str(tempfile.mkstemp(".hdf5")[1])
+  m.save(bob.io.base.HDF5File(filename, 'w'))
+  m_loaded = PLDAMachine(bob.io.base.HDF5File(filename), mb)
+
+  # Compares the values loaded with the former ones
+  assert m_loaded == m
+  assert (m_loaded != m) is False
+  assert abs(m_loaded.n_samples - n_samples) < 1e-10
+  assert abs(m_loaded.w_sum_xit_beta_xi - WSumXitBetaXi) < 1e-10
+  assert equals(m_loaded.weighted_sum, weightedSum, 1e-10)
+  assert abs(m_loaded.log_likelihood - log_likelihood) < 1e-10
+  assert m_loaded.has_gamma(3)
+  assert equals(m_loaded.get_add_gamma(3), gamma3, 1e-10)
+  assert equals(m_loaded.get_gamma(3), gamma3, 1e-10)
+  assert m_loaded.has_log_like_const_term(3)
+  assert abs(m_loaded.get_add_log_like_const_term(3) - constTerm3) < 1e-10
+  assert abs(m_loaded.get_log_like_const_term(3) - constTerm3) < 1e-10
+
+  # Test clear_maps method
+  assert m_loaded.has_gamma(3)
+  assert m_loaded.has_log_like_const_term(3)
+  m_loaded.clear_maps()
+  assert (m_loaded.has_gamma(3)) is False
+  assert (m_loaded.has_log_like_const_term(3)) is False
+
+  # Check exceptions
+  #m_loaded2 = PLDAMachine(bob.io.base.HDF5File(filename))
+  #m_loaded2.load(bob.io.base.HDF5File(filename))
+  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'shape')
+  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_f')
+  #nose.tools.assert_raises(RuntimeError, getattr, m_loaded2, 'dim_g')
+  #nose.tools.assert_raises(RuntimeError, m_loaded2.forward, [1.])
+  #nose.tools.assert_raises(RuntimeError, m_loaded2.compute_log_likelihood, [1.])
+
+  # Clean-up
+  os.unlink(filename)
+
+
+def test_plda_machine_log_likelihood_Python():
+
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  sigma = numpy.ndarray(C_dim_d, 'float64')
+  sigma.fill(0.01)
+  mu = numpy.ndarray(C_dim_d, 'float64')
+  mu.fill(0)
+
+  # Defines base machine
+  mb = PLDABase(C_dim_d, C_dim_f, C_dim_g)
+  # Sets the current mu, F, G and sigma
+  mb.mu = mu
+  mb.f = C_F
+  mb.g = C_G
+  mb.sigma = sigma
+
+  # Defines machine
+  m = PLDAMachine(mb)
+
+  # Defines (random) samples and check compute_log_likelihood method
+  ar_e = numpy.random.randn(2,C_dim_d)
+  ar_p = numpy.random.randn(C_dim_d)
+  ar_s = numpy.vstack([ar_e, ar_p])
+  assert abs(m.compute_log_likelihood(ar_s, False) - compute_log_likelihood(ar_s, mu, C_F, C_G, sigma)) < 1e-10
+  ar_p2d = numpy.reshape(ar_p, (1,C_dim_d))
+  assert abs(m.compute_log_likelihood(ar_p, False) - compute_log_likelihood(ar_p2d, mu, C_F, C_G, sigma)) < 1e-10
+
+  # Defines (random) samples and check forward method
+  ar2_e = numpy.random.randn(4,C_dim_d)
+  ar2_p = numpy.random.randn(C_dim_d)
+  ar2_s = numpy.vstack([ar2_e, ar2_p])
+  m.log_likelihood = m.compute_log_likelihood(ar2_e, False)
+  llr = m.compute_log_likelihood(ar2_s, True) - (m.compute_log_likelihood(ar2_s, False) + m.log_likelihood)
+  assert abs(m(ar2_s) - llr) < 1e-10
+  ar2_p2d = numpy.random.randn(3,C_dim_d)
+  ar2_s2d = numpy.vstack([ar2_e, ar2_p2d])
+  llr2d = m.compute_log_likelihood(ar2_s2d, True) - (m.compute_log_likelihood(ar2_s2d, False) + m.log_likelihood)
+  assert abs(m(ar2_s2d) - llr2d) < 1e-10
+
+def test_plda_machine_log_likelihood_Prince():
+
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  D = 7
+  nf = 2
+  ng = 3
+
+  # initial values for F, G and sigma
+  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
+    -0.6249,  0.1021, -0.8658,
+    -1.1687,  1.1963,  0.1807,
+    0.3926,  0.1203,  1.2665,
+    1.3018, -1.0368, -0.2512,
+    -0.5936, -0.8571, -0.2046,
+    0.4364, -0.1699, -2.2015]).reshape(D,ng)
+  # F <-> PCA on G
+  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
+    0.596449127693018,  0.000000006265167,
+    0.298224563846509,  0.000000003132583,
+    0.447336845769764,  0.000000009397750,
+    -0.108445295944185, -0.000000001566292,
+    -0.501559493741856, -0.000000006265167,
+    -0.298224563846509, -0.000000003132583]).reshape(D,nf)
+  sigma_init = 0.01 * numpy.ones((D,), 'float64')
+  mean_zero = numpy.zeros((D,), 'float64')
+
+  # base machine
+  mb = PLDABase(D,nf,ng)
+  mb.sigma = sigma_init
+  mb.g = G_init
+  mb.f = F_init
+  mb.mu = mean_zero
+
+  # Data for likelihood computation
+  x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
+  x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
+  x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
+  X = numpy.ndarray((3,D), 'float64')
+  X[0,:] = x1
+  X[1,:] = x2
+  X[2,:] = x3
+  a = []
+  a.append(x1)
+  a.append(x2)
+  a.append(x3)
+  a = numpy.array(a)
+
+  # reference likelihood from Prince implementation
+  ll_ref = -182.8880743535197
+
+  # machine
+  m = PLDAMachine(mb)
+  ll = m.compute_log_likelihood(X)
+  assert abs(ll - ll_ref) < 1e-10
+
+  # log likelihood ratio
+  Y = numpy.ndarray((2,D), 'float64')
+  Y[0,:] = x1
+  Y[1,:] = x2
+  Z = numpy.ndarray((1,D), 'float64')
+  Z[0,:] = x3
+  llX = m.compute_log_likelihood(X)
+  llY = m.compute_log_likelihood(Y)
+  llZ = m.compute_log_likelihood(Z)
+  # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
+  # and [x3] separately
+  llr_ref = -4.43695386675
+  assert abs((llX - (llY + llZ)) - llr_ref) < 1e-10
diff --git a/bob/learn/em/test/test_plda_trainer.py b/bob/learn/em/test/test_plda_trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..f72ab6f7278f167b3bd9a3101d3e0191cf6c6e98
--- /dev/null
+++ b/bob/learn/em/test/test_plda_trainer.py
@@ -0,0 +1,741 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Fri Oct 14 18:07:56 2011 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests PLDA trainer
+"""
+
+import sys
+import numpy
+import numpy.linalg
+
+from bob.learn.em import PLDATrainer, PLDABase, PLDAMachine
+
+class PythonPLDATrainer():
+  """A simplified (and slower) version of the PLDATrainer"""
+
+  def __init__(self, convergence_threshold=0.001, max_iterations=10,
+      compute_likelihood=False, use_sum_second_order=True):
+    # Our state
+    self.m_convergence_threshold = convergence_threshold
+    self.m_max_iterations = max_iterations
+    self.m_compute_likelihood = compute_likelihood
+    self.m_dim_f = 0
+    self.m_dim_g = 0
+    self.m_B = numpy.ndarray(shape=(0,0), dtype=numpy.float64)
+    self.m_n_samples_per_id = numpy.ndarray(shape=(0,), dtype=numpy.float64)
+    self.m_z_first_order = []
+    self.m_z_second_order = []
+    self.m_sum_z_second_order = numpy.ndarray(shape=(0,0), dtype=numpy.float64)
+
+  def reset():
+    """Resets our internal state"""
+    self.m_convergence_threshold = 0.001
+    self.m_max_iterations = 10
+    self.m_compute_likelihood = False
+    self.m_dim_f = 0
+    self.m_dim_g = 0
+    self.m_n_samples_per_id = numpy.ndarray(shape=(0,), dtype=numpy.float64)
+    self.m_z_first_order = []
+    self.m_z_second_order = []
+    self.m_sum_z_second_order = numpy.ndarray(shape=(0,0), dtype=numpy.float64)
+
+  def __check_training_data__(self, data):
+    if len(data) == 0:
+      raise RuntimeError("Training data set is empty")
+    n_features = data[0].shape[1]
+    for v in data:
+      if(v.shape[1] != n_features):
+        raise RuntimeError("Inconsistent feature dimensionality in training data set")
+
+  def __init_members__(self, data):
+    n_features = data[0].shape[1]
+    self.m_z_first_order = []
+    df_dg = self.m_dim_f+self.m_dim_g
+    self.m_sum_z_second_order.resize(df_dg, df_dg)
+    self.m_n_samples_per_id.resize(len(data))
+    self.m_B.resize(n_features, df_dg)
+    for i in range(len(data)):
+      ns_i = data[i].shape[0]
+      self.m_n_samples_per_id[i] = ns_i
+      self.m_z_first_order.append(numpy.ndarray(shape=(ns_i, df_dg), dtype=numpy.float64))
+      self.m_z_second_order.append(numpy.ndarray(shape=(ns_i, df_dg, df_dg), dtype=numpy.float64))
+
+  def __init_mu__(self, machine, data):
+    mu = numpy.zeros(shape=machine.mu.shape[0], dtype=numpy.float64)
+    c = 0
+    # Computes the mean of the data
+    for v in data:
+      for i in range(v.shape[0]):
+        mu += v[i,:]
+        c +=1
+    mu /= c
+    machine.mu = mu
+
+  def __init_f__(self, machine, data):
+    n_ids = len(data)
+    S = numpy.zeros(shape=(machine.shape[0], n_ids), dtype=numpy.float64)
+    Si_sum = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
+    for i in range(n_ids):
+      Si = S[:,i]
+      data_i = data[i]
+      for j in range(data_i.shape[0]):
+        Si += data_i[j,:]
+      Si /= data_i.shape[0]
+      Si_sum += Si
+    Si_sum /= n_ids
+
+    S = S - numpy.tile(Si_sum.reshape([machine.shape[0],1]), [1,n_ids])
+    U, sigma, S_ = numpy.linalg.svd(S, full_matrices=False)
+    U_slice = U[:,0:self.m_dim_f]
+    sigma_slice = sigma[0:self.m_dim_f]
+    sigma_slice_sqrt = numpy.sqrt(sigma_slice)
+    machine.f = U_slice / sigma_slice_sqrt
+
+  def __init_g__(self, machine, data):
+    n_samples = 0
+    for v in data:
+      n_samples += v.shape[0]
+    S = numpy.zeros(shape=(machine.shape[0], n_samples), dtype=numpy.float64)
+    Si_sum = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
+    cache = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
+    c = 0
+    for i in range(len(data)):
+      cache = 0
+      data_i = data[i]
+      for j in range(data_i.shape[0]):
+        cache += data_i[j,:]
+      cache /= data_i.shape[0]
+      for j in range(data_i.shape[0]):
+        S[:,c] = data_i[j,:] - cache
+        Si_sum += S[:,c]
+        c += 1
+    Si_sum /= n_samples
+
+    S = S - numpy.tile(Si_sum.reshape([machine.shape[0],1]), [1,n_samples])
+    U, sigma, S_ = numpy.linalg.svd(S, full_matrices=False)
+    U_slice = U[:,0:self.m_dim_g]
+    sigma_slice_sqrt = numpy.sqrt(sigma[0:self.m_dim_g])
+    machine.g = U_slice / sigma_slice_sqrt
+
+  def __init_sigma__(self, machine, data, factor = 1.):
+    """As a variance of the data"""
+    cache1 = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
+    cache2 = numpy.zeros(shape=(machine.shape[0],), dtype=numpy.float64)
+    n_samples = 0
+    for v in data:
+      for j in range(v.shape[0]):
+        cache1 += v[j,:]
+      n_samples += v.shape[0]
+    cache1 /= n_samples
+    for v in data:
+      for j in range(v.shape[0]):
+        cache2 += numpy.square(v[j,:] - cache1)
+    machine.sigma = factor * cache2 / (n_samples - 1)
+
+  def __init_mu_f_g_sigma__(self, machine, data):
+    self.__init_mu__(machine, data)
+    self.__init_f__(machine, data)
+    self.__init_g__(machine, data)
+    self.__init_sigma__(machine, data)
+
+  def initialize(self, machine, data):
+    self.__check_training_data__(data)
+    n_features = data[0].shape[1]
+    if(machine.shape[0] != n_features):
+      raise RuntimeError("Inconsistent feature dimensionality between the machine and the training data set")
+    self.m_dim_f = machine.shape[1]
+    self.m_dim_g = machine.shape[2]
+    self.__init_members__(data)
+    # Warning: Default initialization of mu, F, G, sigma using scatters
+    self.__init_mu_f_g_sigma__(machine, data)
+    # Make sure that the precomputation has been performed
+    machine.__precompute__()
+
+  def __compute_sufficient_statistics_given_observations__(self, machine, observations):
+    """
+    We compute the expected values of the latent variables given the observations
+    and parameters of the model.
+
+    First order or the expected value of the latent variables.:
+      F = (I+A^{T}\Sigma'^{-1}A)^{-1} * A^{T}\Sigma^{-1} (\tilde{x}_{s}-\mu').
+    Second order stats:
+      S = (I+A^{T}\Sigma'^{-1}A)^{-1} + (F*F^{T}).
+    """
+
+    # Get the number of observations
+    J_i                       = observations.shape[0]            # An integer > 0
+    dim_d                     = observations.shape[1]            # A scalar
+    # Useful values
+    mu                        = machine.mu
+    F                         = machine.f
+    G                         = machine.g
+    sigma                     = machine.sigma
+    isigma                    = machine.__isigma__
+    alpha                     = machine.__alpha__
+    ft_beta                   = machine.__ft_beta__
+    gamma                     = machine.get_add_gamma(J_i)
+    # Normalise the observations
+    normalised_observations   = observations - numpy.tile(mu, [J_i,1]) # (dim_d, J_i)
+
+    ### Expected value of the latent variables using the scalable solution
+    # Identity part first
+    sum_ft_beta_part          = numpy.zeros(self.m_dim_f)     # (dim_f)
+    for j in range(0, J_i):
+      current_observation     = normalised_observations[j,:]  # (dim_d)
+      sum_ft_beta_part        = sum_ft_beta_part + numpy.dot(ft_beta, current_observation)  # (dim_f)
+    h_i                       = numpy.dot(gamma, sum_ft_beta_part)                          # (dim_f)
+    # Reproject the identity part to work out the session parts
+    Fh_i                      = numpy.dot(F, h_i)                                           # (dim_d)
+    z_first_order = numpy.zeros((J_i, self.m_dim_f+self.m_dim_g))
+    for j in range(0, J_i):
+      current_observation       = normalised_observations[j,:]                  # (dim_d)
+      w_ij                      = numpy.dot(alpha, G.transpose())               # (dim_g, dim_d)
+      w_ij                      = numpy.multiply(w_ij, isigma)                  # (dim_g, dim_d)
+      w_ij                      = numpy.dot(w_ij, (current_observation - Fh_i)) # (dim_g)
+      z_first_order[j,:]        = numpy.hstack([h_i,w_ij])                      # (dim_f+dim_g)
+
+    ### Calculate the expected value of the squared of the latent variables
+    # The constant matrix we use has the following parts: [top_left, top_right; bottom_left, bottom_right]
+    # P             = Inverse_I_plus_GTEG * G^T * Sigma^{-1} * F  (dim_g, dim_f)
+    # top_left      = gamma                                       (dim_f, dim_f)
+    # bottom_left   = top_right^T = P * gamma                     (dim_g, dim_f)
+    # bottom_right  = Inverse_I_plus_GTEG - bottom_left * P^T     (dim_g, dim_g)
+    top_left                 = gamma
+    P                        = numpy.dot(alpha, G.transpose())
+    P                        = numpy.dot(numpy.dot(P,numpy.diag(isigma)), F)
+    bottom_left              = -1 * numpy.dot(P, top_left)
+    top_right                = bottom_left.transpose()
+    bottom_right             = alpha -1 * numpy.dot(bottom_left, P.transpose())
+    constant_matrix          = numpy.bmat([[top_left,top_right],[bottom_left, bottom_right]])
+
+    # Now get the actual expected value
+    z_second_order = numpy.zeros((J_i, self.m_dim_f+self.m_dim_g, self.m_dim_f+self.m_dim_g))
+    for j in range(0, J_i):
+      z_second_order[j,:,:] = constant_matrix + numpy.outer(z_first_order[j,:],z_first_order[j,:])  # (dim_f+dim_g,dim_f+dim_g)
+
+    ### Return the first and second order statistics
+    return(z_first_order, z_second_order)
+
+  def e_step(self, machine, data):
+    self.m_sum_z_second_order.fill(0.)
+    for i in range(len(data)):
+      ### Get the observations for this label and the number of observations for this label.
+      observations_for_h_i      = data[i]
+      J_i                       = observations_for_h_i.shape[0]                           # An integer > 0
+
+      ### Gather the statistics for this identity and then separate them for each observation.
+      [z_first_order, z_second_order] = self.__compute_sufficient_statistics_given_observations__(machine, observations_for_h_i)
+      self.m_z_first_order[i]  = z_first_order
+      self.m_z_second_order[i] = z_second_order
+      J_i = len(z_second_order)
+      for j in range(0, J_i):
+        self.m_sum_z_second_order += z_second_order[j]
+
+  def __update_f_and_g__(self, machine, data):
+    ### Initialise the numerator and the denominator.
+    dim_d                          = machine.shape[0]
+    accumulated_B_numerator        = numpy.zeros((dim_d,self.m_dim_f+self.m_dim_g))
+    accumulated_B_denominator      = numpy.linalg.inv(self.m_sum_z_second_order)
+    mu                             = machine.mu
+
+    ### Go through and process on a per subjectid basis
+    for i in range(len(data)):
+      # Normalise the observations
+      J_i                       = data[i].shape[0]
+      normalised_observations   = data[i] - numpy.tile(mu, [J_i,1]) # (J_i, dim_d)
+
+      ### Gather the statistics for this label
+      z_first_order_i                    = self.m_z_first_order[i]  # List of (dim_f+dim_g) vectors
+
+      ### Accumulate for the B matrix for this identity (current_label).
+      for j in range(0, J_i):
+        current_observation_for_h_i   = normalised_observations[j,:]   # (dim_d)
+        accumulated_B_numerator       = accumulated_B_numerator + numpy.outer(current_observation_for_h_i, z_first_order_i[j,:])  # (dim_d, dim_f+dim_g);
+
+    ### Update the B matrix which we can then use this to update the F and G matrices.
+    B                                  = numpy.dot(accumulated_B_numerator,accumulated_B_denominator)
+    machine.f                          = B[:,0:self.m_dim_f].copy()
+    machine.g                          = B[:,self.m_dim_f:self.m_dim_f+self.m_dim_g].copy()
+
+  def __update_sigma__(self, machine, data):
+    ### Initialise the accumulated Sigma
+    dim_d                          = machine.shape[0]
+    mu                             = machine.mu
+    accumulated_sigma              = numpy.zeros(dim_d)   # An array (dim_d)
+    number_of_observations         = 0
+    B = numpy.hstack([machine.f, machine.g])
+
+    ### Go through and process on a per subjectid basis (based on the labels we were given.
+    for i in range(len(data)):
+      # Normalise the observations
+      J_i                       = data[i].shape[0]
+      normalised_observations   = data[i] - numpy.tile(mu, [J_i,1]) # (J_i, dim_d)
+
+      ### Gather the statistics for this identity and then separate them for each
+      ### observation.
+      z_first_order_i                    = self.m_z_first_order[i]  # List of (dim_f+dim_g) vectors
+
+      ### Accumulate for the sigma matrix, which will be diagonalised
+      for j in range(0, J_i):
+        current_observation_for_h_i   = normalised_observations[j,:]  # (dim_d)
+        left                          = current_observation_for_h_i * current_observation_for_h_i # (dim_d)
+        projected_direction           = numpy.dot(B, z_first_order_i[j,:])                        # (dim_d)
+        right                         = projected_direction * current_observation_for_h_i         # (dim_d)
+        accumulated_sigma             = accumulated_sigma + (left - right)                        # (dim_d)
+        number_of_observations        = number_of_observations + 1
+
+    ### Normalise by the number of observations (1/IJ)
+    machine.sigma                     = accumulated_sigma / number_of_observations;
+
+  def m_step(self, machine, data):
+    self.__update_f_and_g__(machine, data)
+    self.__update_sigma__(machine, data)
+    machine.__precompute__()
+
+  def finalize(self, machine, data):
+    machine.__precompute_log_like__()
+
+  def train(self, machine, data):
+    self.initialize(machine, data)
+    average_output_previous = -sys.maxsize
+    average_output = -sys.maxsize
+    self.e_step(machine, data)
+
+    i = 0
+    while True:
+      average_output_previous = average_output
+      self.m_step(machine, data)
+      self.e_step(machine, data)
+      if(self.m_max_iterations > 0 and i+1 >= self.m_max_iterations):
+        break
+      i += 1
+
+
+def test_plda_EM_vs_Python():
+
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  D = 7
+  nf = 2
+  ng = 3
+
+  # first identity (4 samples)
+  a = numpy.array([
+    [1,2,3,4,5,6,7],
+    [7,8,3,3,1,8,2],
+    [3,2,1,4,5,1,7],
+    [9,0,3,2,1,4,6],
+    ], dtype='float64')
+
+  # second identity (3 samples)
+  b = numpy.array([
+    [5,6,3,4,2,0,2],
+    [1,7,8,9,4,4,8],
+    [8,7,2,5,1,1,1],
+    ], dtype='float64')
+
+  # list of arrays (training data)
+  l = [a,b]
+
+  # initial values for F, G and sigma
+  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
+    -0.6249,  0.1021, -0.8658,
+    -1.1687,  1.1963,  0.1807,
+    0.3926,  0.1203,  1.2665,
+    1.3018, -1.0368, -0.2512,
+    -0.5936, -0.8571, -0.2046,
+    0.4364, -0.1699, -2.2015]).reshape(D,ng)
+
+  # F <-> PCA on G
+  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
+    0.596449127693018,  0.000000006265167,
+    0.298224563846509,  0.000000003132583,
+    0.447336845769764,  0.000000009397750,
+    -0.108445295944185, -0.000000001566292,
+    -0.501559493741856, -0.000000006265167,
+    -0.298224563846509, -0.000000003132583]).reshape(D,nf)
+  sigma_init = 0.01 * numpy.ones(D, 'float64')
+
+  # Runs the PLDA trainer EM-steps (2 steps)
+  # Defines base trainer and machine
+  t = PLDATrainer(10)
+  t_py = PythonPLDATrainer(max_iterations=10)
+  m = PLDABase(D,nf,ng)
+  m_py = PLDABase(D,nf,ng)
+
+  # Sets the same initialization methods
+  t.init_f_method = 'BETWEEN_SCATTER'
+  t.init_g_method = 'WITHIN_SCATTER'
+  t.init_sigma_method = 'VARIANCE_DATA'
+
+  t.train(m, l)
+  t_py.train(m_py, l)
+  assert numpy.allclose(m.mu, m_py.mu)
+  assert numpy.allclose(m.f, m_py.f)
+  assert numpy.allclose(m.g, m_py.g)
+  assert numpy.allclose(m.sigma, m_py.sigma)
+
+
+def test_plda_EM_vs_Prince():
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  dim_d = 7
+  dim_f = 2
+  dim_g = 3
+
+  # first identity (4 samples)
+  a = numpy.array([
+    [1,2,3,4,5,6,7],
+    [7,8,3,3,1,8,2],
+    [3,2,1,4,5,1,7],
+    [9,0,3,2,1,4,6],
+    ], dtype='float64')
+
+  # second identity (3 samples)
+  b = numpy.array([
+    [5,6,3,4,2,0,2],
+    [1,7,8,9,4,4,8],
+    [8,7,2,5,1,1,1],
+    ], dtype='float64')
+
+  # list of arrays (training data)
+  l = [a,b]
+
+  # initial values for F, G and sigma
+  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
+    -0.6249,  0.1021, -0.8658,
+    -1.1687,  1.1963,  0.1807,
+    0.3926,  0.1203,  1.2665,
+    1.3018, -1.0368, -0.2512,
+    -0.5936, -0.8571, -0.2046,
+    0.4364, -0.1699, -2.2015]).reshape(dim_d,dim_g)
+
+  # F <-> PCA on G
+  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
+    0.596449127693018,  0.000000006265167,
+    0.298224563846509,  0.000000003132583,
+    0.447336845769764,  0.000000009397750,
+    -0.108445295944185, -0.000000001566292,
+    -0.501559493741856, -0.000000006265167,
+    -0.298224563846509, -0.000000003132583]).reshape(dim_d,dim_f)
+  sigma_init = 0.01 * numpy.ones(dim_d, 'float64')
+
+  # Defines reference results based on Princes'matlab implementation
+  # After 1 iteration
+  z_first_order_a_1 = numpy.array(
+    [-2.624115900658397, -0.000000034277848,  1.554823055585319,  0.627476234024656, -0.264705934182394,
+     -2.624115900658397, -0.000000034277848, -2.703482671599357, -1.533283607433197,  0.553725774828231,
+     -2.624115900658397, -0.000000034277848,  2.311647528461115,  1.266362142140170, -0.317378177105131,
+     -2.624115900658397, -0.000000034277848, -1.163402640008200, -0.372604542926019,  0.025152800097991
+    ]).reshape(4, dim_f+dim_g)
+  z_first_order_b_1 = numpy.array(
+    [ 3.494168818797438,  0.000000045643026,  0.111295550530958, -0.029241422535725,  0.257045446451067,
+      3.494168818797438,  0.000000045643026,  1.102110715965762,  1.481232954001794, -0.970661225144399,
+      3.494168818797438,  0.000000045643026, -1.212854031699468, -1.435946529317718,  0.717884143973377
+    ]).reshape(3, dim_f+dim_g)
+
+  z_second_order_sum_1 = numpy.array(
+    [64.203518285366087,  0.000000747228248,  0.002703277337642,  0.078542842475345,  0.020894328259862,
+      0.000000747228248,  6.999999999999980, -0.000000003955962,  0.000000002017232, -0.000000003741593,
+      0.002703277337642, -0.000000003955962, 19.136889380923918, 11.860493771107487, -4.584339465366988,
+      0.078542842475345,  0.000000002017232, 11.860493771107487,  8.771502339750128, -3.905706024997424,
+      0.020894328259862, -0.000000003741593, -4.584339465366988, -3.905706024997424,  2.011924970338584
+    ]).reshape(dim_f+dim_g, dim_f+dim_g)
+
+  sigma_1 = numpy.array(
+      [2.193659969999207, 3.748361365521041, 0.237835235737085,
+        0.558546035892629, 0.209272700958400, 1.717782807724451,
+        0.248414618308223])
+
+  F_1 = numpy.array(
+      [-0.059083416465692,  0.000000000751007,
+        0.600133217253169,  0.000000006957266,
+        0.302789123922871,  0.000000000218947,
+        0.454540641429714,  0.000000003342540,
+        -0.106608957780613, -0.000000001641389,
+        -0.494267694269430, -0.000000011059552,
+        -0.295956102084270, -0.000000006718366]).reshape(dim_d,dim_f)
+
+  G_1 = numpy.array(
+      [-1.836166150865047,  2.491475145758734,  5.095958946372235,
+        -0.608732205531767, -0.618128420353493, -1.085423135463635,
+        -0.697390472635929, -1.047900122276840, -6.080211153116984,
+        0.769509301515319, -2.763610156675313, -5.972172587527176,
+        1.332474692714491, -1.368103875407414, -2.096382536513033,
+        0.304135903830416, -5.168096082564016, -9.604769461465978,
+        0.597445549865284, -1.347101803379971, -5.900246013340080]).reshape(dim_d,dim_g)
+
+  # After 2 iterations
+  z_first_order_a_2 = numpy.array(
+      [-2.144344161196005, -0.000000027851878,  1.217776189037369,  0.232492571855061, -0.212892893868819,
+        -2.144344161196005, -0.000000027851878, -2.382647766948079, -1.759951013670071,  0.587213207926731,
+        -2.144344161196005, -0.000000027851878,  2.143294830538722,  0.909307594408923, -0.183752098508072,
+        -2.144344161196005, -0.000000027851878, -0.662558006326892,  0.717992497547010, -0.202897892977004
+    ]).reshape(4, dim_f+dim_g)
+  z_first_order_b_2 = numpy.array(
+      [ 2.695117129662246,  0.000000035005543, -0.156173294945791, -0.123083763746364,  0.271123341933619,
+        2.695117129662246,  0.000000035005543,  0.690321563509753,  0.944473716646212, -0.850835940962492,
+        2.695117129662246,  0.000000035005543, -0.930970138998433, -0.949736472690315,  0.594216348861889
+    ]).reshape(3, dim_f+dim_g)
+
+  z_second_order_sum_2 = numpy.array(
+      [41.602421167226410,  0.000000449434708, -1.513391506933811, -0.477818674270533,  0.059260102368316,
+        0.000000449434708,  7.000000000000005, -0.000000023255959, -0.000000005157439, -0.000000003230262,
+        -1.513391506933810, -0.000000023255959, 14.399631061987494,  8.068678077509025, -3.227586434905497,
+        -0.477818674270533, -0.000000005157439,  8.068678077509025,  7.263248678863863, -3.060665688064639,
+        0.059260102368316, -0.000000003230262, -3.227586434905497, -3.060665688064639,  1.705174220723198
+    ]).reshape(dim_f+dim_g, dim_f+dim_g)
+
+  sigma_2 = numpy.array(
+    [1.120493935052524, 1.777598857891599, 0.197579528599150,
+      0.407657093211478, 0.166216300651473, 1.044336960403809,
+      0.287856936559308])
+
+  F_2 = numpy.array(
+    [-0.111956311978966,  0.000000000781025,
+      0.702502767389263,  0.000000007683917,
+      0.337823622542517,  0.000000000637302,
+      0.551363737526339,  0.000000004854293,
+     -0.096561040511417, -0.000000001716011,
+     -0.661587484803602, -0.000000012394362,
+     -0.346593051621620, -0.000000007134046]).reshape(dim_d,dim_f)
+
+  G_2 = numpy.array(
+    [-2.266404374274820,  4.089199685832099,  7.023039382876370,
+      0.094887459097613, -3.226829318470136, -3.452279917194724,
+     -0.498398131733141, -1.651712333649899, -6.548008210704172,
+      0.574932298590327, -2.198978667003715, -5.131253543126156,
+      1.415857426810629, -1.627795701160212, -2.509013676007012,
+     -0.543552834305580, -3.215063993186718, -7.006305082499653,
+      0.562108137758111, -0.785296641855087, -5.318335345720314]).reshape(dim_d,dim_g)
+
+  # Runs the PLDA trainer EM-steps (2 steps)
+
+  # Defines base trainer and machine
+  t = PLDATrainer()
+  t0 = PLDATrainer(t)
+  m = PLDABase(dim_d,dim_f,dim_g)
+  t.initialize(m,l)
+  m.sigma = sigma_init
+  m.g = G_init
+  m.f = F_init
+
+  # Defines base trainer and machine (for Python implementation
+  t_py = PythonPLDATrainer()
+  m_py = PLDABase(dim_d,dim_f,dim_g)
+  t_py.initialize(m_py,l)
+  m_py.sigma = sigma_init
+  m_py.g = G_init
+  m_py.f = F_init
+
+  # E-step 1
+  t.e_step(m,l)
+  t_py.e_step(m_py,l)
+  # Compares statistics to Prince matlab reference
+  assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
+  assert numpy.allclose(t.z_first_order[1], z_first_order_b_1, 1e-10)
+  assert numpy.allclose(t.z_second_order_sum, z_second_order_sum_1, 1e-10)
+  # Compares statistics against the ones of the python implementation
+  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
+  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
+  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
+
+  # M-step 1
+  t.m_step(m,l)
+  t_py.m_step(m_py,l)
+  # Compares F, G and sigma to Prince matlab reference
+  assert numpy.allclose(m.f, F_1, 1e-10)
+  assert numpy.allclose(m.g, G_1, 1e-10)
+  assert numpy.allclose(m.sigma, sigma_1, 1e-10)
+  # Compares F, G and sigma to the ones of the python implementation
+  assert numpy.allclose(m.f, m_py.f, 1e-10)
+  assert numpy.allclose(m.g, m_py.g, 1e-10)
+  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
+
+  # E-step 2
+  t.e_step(m,l)
+  t_py.e_step(m_py,l)
+  # Compares statistics to Prince matlab reference
+  assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
+  assert numpy.allclose(t.z_first_order[1], z_first_order_b_2, 1e-10)
+  assert numpy.allclose(t.z_second_order_sum, z_second_order_sum_2, 1e-10)
+  # Compares statistics against the ones of the python implementation
+  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
+  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
+  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
+
+  # M-step 2
+  t.m_step(m,l)
+  t_py.m_step(m_py,l)
+  # Compares F, G and sigma to Prince matlab reference
+  assert numpy.allclose(m.f, F_2, 1e-10)
+  assert numpy.allclose(m.g, G_2, 1e-10)
+  assert numpy.allclose(m.sigma, sigma_2, 1e-10)
+  # Compares F, G and sigma to the ones of the python implementation
+  assert numpy.allclose(m.f, m_py.f, 1e-10)
+  assert numpy.allclose(m.g, m_py.g, 1e-10)
+  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
+
+
+  # Test the second order statistics computation
+  # Calls the initialization methods and resets randomly initialized values
+  # to new reference ones (to make the tests deterministic)
+  t.use_sum_second_order = False
+  t.initialize(m,l)
+  m.sigma = sigma_init
+  m.g = G_init
+  m.f = F_init
+  t_py.initialize(m_py,l)
+  m_py.sigma = sigma_init
+  m_py.g = G_init
+  m_py.f = F_init
+
+  # E-step 1
+  t.e_step(m,l)
+  t_py.e_step(m_py,l)
+  # Compares statistics to Prince matlab reference
+  assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
+  assert numpy.allclose(t.z_first_order[1], z_first_order_b_1, 1e-10)
+  # Compares statistics against the ones of the python implementation
+  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
+  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
+  assert numpy.allclose(t.z_second_order[0], t_py.m_z_second_order[0], 1e-10)
+  assert numpy.allclose(t.z_second_order[1], t_py.m_z_second_order[1], 1e-10)
+  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
+
+  # M-step 1
+  t.m_step(m,l)
+  t_py.m_step(m_py,l)
+  # Compares F, G and sigma to the ones of the python implementation
+  assert numpy.allclose(m.f, m_py.f, 1e-10)
+  assert numpy.allclose(m.g, m_py.g, 1e-10)
+  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
+
+  # E-step 2
+  t.e_step(m,l)
+  t_py.e_step(m_py,l)
+  # Compares statistics to Prince matlab reference
+  assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
+  assert numpy.allclose(t.z_first_order[1], z_first_order_b_2, 1e-10)
+  # Compares statistics against the ones of the python implementation
+  assert numpy.allclose(t.z_first_order[0], t_py.m_z_first_order[0], 1e-10)
+  assert numpy.allclose(t.z_first_order[1], t_py.m_z_first_order[1], 1e-10)
+  assert numpy.allclose(t.z_second_order[0], t_py.m_z_second_order[0], 1e-10)
+  assert numpy.allclose(t.z_second_order[1], t_py.m_z_second_order[1], 1e-10)
+  assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
+
+  # M-step 2
+  t.m_step(m,l)
+  t_py.m_step(m_py,l)
+  # Compares F, G and sigma to the ones of the python implementation
+  assert numpy.allclose(m.f, m_py.f, 1e-10)
+  assert numpy.allclose(m.g, m_py.g, 1e-10)
+  assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
+
+
+def test_plda_enrollment():
+  # Data used for performing the tests
+  # Features and subspaces dimensionality
+  dim_d = 7
+  dim_f = 2
+  dim_g = 3
+
+  # initial values for F, G and sigma
+  G_init=numpy.array([-1.1424, -0.5044, -0.1917,
+    -0.6249,  0.1021, -0.8658,
+    -1.1687,  1.1963,  0.1807,
+    0.3926,  0.1203,  1.2665,
+    1.3018, -1.0368, -0.2512,
+    -0.5936, -0.8571, -0.2046,
+    0.4364, -0.1699, -2.2015]).reshape(dim_d,dim_g)
+  # F <-> PCA on G
+  F_init=numpy.array([-0.054222647972093, -0.000000000783146,
+    0.596449127693018,  0.000000006265167,
+    0.298224563846509,  0.000000003132583,
+    0.447336845769764,  0.000000009397750,
+    -0.108445295944185, -0.000000001566292,
+    -0.501559493741856, -0.000000006265167,
+    -0.298224563846509, -0.000000003132583]).reshape(dim_d,dim_f)
+  sigma_init = 0.01 * numpy.ones((dim_d,), 'float64')
+  mean_zero = numpy.zeros((dim_d,), 'float64')
+
+  # base machine
+  mb = PLDABase(dim_d,dim_f,dim_g)
+  mb.sigma = sigma_init
+  mb.g = G_init
+  mb.f = F_init
+  mb.mu = mean_zero
+
+  # Data for likelihood computation
+  x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
+  x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
+  x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
+  a_enrol = []
+  a_enrol.append(x1)
+  a_enrol.append(x2)
+  a_enrol = numpy.array(a_enrol)
+
+  # reference likelihood from Prince implementation
+  ll_ref = -182.8880743535197
+
+  # Computes the likelihood using x1 and x2 as enrollment samples
+  # and x3 as a probe sample
+  m = PLDAMachine(mb)
+  t = PLDATrainer()
+  t.enrol(m, a_enrol)
+  ll = m.compute_log_likelihood(x3)
+  
+  assert abs(ll - ll_ref) < 1e-10
+
+  # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
+  # and [x3] separately
+  llr_ref = -4.43695386675
+  llr = m(x3)
+  assert abs(llr - llr_ref) < 1e-10
+  #
+  llr_separate = m.compute_log_likelihood(numpy.array([x1,x2,x3]), False) - \
+    (m.compute_log_likelihood(numpy.array([x1,x2]), False) + m.compute_log_likelihood(numpy.array([x3]), False))
+  assert abs(llr - llr_separate) < 1e-10
+
+
+
+def test_plda_comparisons():
+
+  t1 = PLDATrainer()
+  t2 = PLDATrainer()
+
+  #t2.rng = t1.rng
+
+  assert t1 == t2
+  assert (t1 != t2 ) is False
+  assert t1.is_similar_to(t2)
+
+  training_set = [numpy.array([[1,2,3,4]], numpy.float64), numpy.array([[3,4,3,4]], numpy.float64)]
+  m = PLDABase(4,1,1,1e-8)
+  t1.rng.seed(37)
+  t1.initialize(m, training_set)
+  t1.e_step(m, training_set)
+  t1.m_step(m, training_set)
+  assert (t1 == t2 ) is False
+  assert t1 != t2
+  assert (t1.is_similar_to(t2) ) is False
+  t2.rng.seed(37)
+  t2.initialize(m, training_set)
+  t2.e_step(m, training_set)
+  t2.m_step(m, training_set)
+  assert t1 == t2
+  assert (t1 != t2 ) is False
+  assert t1.is_similar_to(t2)
+  t2.rng.seed(77)
+  t2.initialize(m, training_set)
+  t2.e_step(m, training_set)
+  t2.m_step(m, training_set)
+  assert (t1 == t2 ) is False
+  assert t1 != t2
+  assert (t1.is_similar_to(t2) ) is False
+
+  
diff --git a/bob/learn/em/test/test_ztnorm.py b/bob/learn/em/test/test_ztnorm.py
new file mode 100644
index 0000000000000000000000000000000000000000..7eaf31a432f400b3975a80bb7f1aa2e98d0e32e2
--- /dev/null
+++ b/bob/learn/em/test/test_ztnorm.py
@@ -0,0 +1,122 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Francois Moulin <Francois.Moulin@idiap.ch>
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+# Tue Jul 19 15:33:20 2011 +0200
+#
+# Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+
+"""Tests on the ZTNorm function
+"""
+
+import numpy
+
+from bob.io.base.test_utils import datafile
+import bob.io.base
+
+#from . import znorm, tnorm, ztnorm
+import bob.learn.em
+
+def sameValue(vect_A, vect_B):
+  sameMatrix = numpy.zeros((vect_A.shape[0], vect_B.shape[0]), 'bool')
+
+  for j in range(vect_A.shape[0]):
+    for i in range(vect_B.shape[0]):
+      sameMatrix[j, i] = (vect_A[j] == vect_B[i])
+
+  return sameMatrix
+
+def tnorm(A, C):
+  Cmean = numpy.mean(C, axis=0)
+  if C.shape[1] > 1:
+    Cstd = numpy.sqrt(numpy.sum((C - numpy.tile(Cmean.reshape(1,C.shape[1]), (C.shape[0],1))) ** 2, axis=0) / (C.shape[0]-1))
+  else:
+    Cstd = numpy.ones(shape=(C.shape[1],), dtype=numpy.float64)
+  return (A - numpy.tile(Cmean.reshape(1,C.shape[1]), (A.shape[0],1))) / numpy.tile(Cstd.reshape(1,C.shape[1]), (A.shape[0],1))
+
+def znorm(A, B):
+  Bmean = numpy.mean(B, axis=1)
+  if B.shape[1] > 1:
+    Bstd = numpy.sqrt(numpy.sum((B - numpy.tile(Bmean.reshape(B.shape[0],1), (1,B.shape[1]))) ** 2, axis=1) / (B.shape[1]-1))
+  else:
+    Bstd = numpy.ones(shape=(B.shape[0],), dtype=numpy.float64)
+
+  return (A - numpy.tile(Bmean.reshape(B.shape[0],1), (1,A.shape[1]))) / numpy.tile(Bstd.reshape(B.shape[0],1), (1,A.shape[1]))
+
+
+def test_ztnorm_simple():
+  # 3x5
+  my_A = numpy.array([[1, 2, 3, 4, 5],
+                      [6, 7, 8, 9, 8],
+                      [7, 6, 5, 4, 3]],'float64')
+  # 3x4
+  my_B = numpy.array([[5, 4, 7, 8],[9, 8, 7, 4],[5, 6, 3, 2]],'float64')
+  # 2x5
+  my_C = numpy.array([[5, 4, 3, 2, 1],[2, 1, 2, 3, 4]],'float64')
+  # 2x4
+  my_D = numpy.array([[8, 6, 4, 2],[0, 2, 4, 6]],'float64')
+
+  # 4x1
+  znorm_id = numpy.array([1, 2, 3, 4],'uint32')
+  # 2x1
+  tnorm_id = numpy.array([1, 5],'uint32')
+  
+  scores = bob.learn.em.ztnorm(my_A, my_B, my_C, my_D,
+      sameValue(tnorm_id, znorm_id))
+
+  ref_scores = numpy.array([[-4.45473107e+00, -3.29289322e+00, -1.50519101e+01, -8.42086557e-01, 6.46544511e-03], [-8.27619927e-01,  7.07106781e-01,  1.13757710e+01,  2.01641412e+00, 7.63765080e-01], [ 2.52913570e+00,  2.70710678e+00,  1.24400233e+01,  7.07106781e-01, 6.46544511e-03]], 'float64')
+
+  assert (abs(scores - ref_scores) < 1e-7).all()
+
+def test_ztnorm_big():
+  my_A = bob.io.base.load(datafile("ztnorm_eval_eval.hdf5", __name__, path="../data/"))
+  my_B = bob.io.base.load(datafile("ztnorm_znorm_eval.hdf5", __name__, path="../data/"))
+  my_C = bob.io.base.load(datafile("ztnorm_eval_tnorm.hdf5", __name__, path="../data/"))
+  my_D = bob.io.base.load(datafile("ztnorm_znorm_tnorm.hdf5", __name__, path="../data/"))
+
+  # ZT-Norm
+  ref_scores = bob.io.base.load(datafile("ztnorm_result.hdf5", __name__, path="../data/"))
+  scores = bob.learn.em.ztnorm(my_A, my_B, my_C, my_D)
+  assert (abs(scores - ref_scores) < 1e-7).all()
+
+  # T-Norm
+  scores = tnorm(my_A, my_C)
+  scores_py = tnorm(my_A, my_C)
+  assert (abs(scores - scores_py) < 1e-7).all()
+
+  # Z-Norm
+  scores = znorm(my_A, my_B)
+  scores_py = znorm(my_A, my_B)
+  assert (abs(scores - scores_py) < 1e-7).all()
+
+def test_tnorm_simple():
+  # 3x5
+  my_A = numpy.array([[1, 2, 3, 4, 5],
+                      [6, 7, 8, 9, 8],
+                      [7, 6, 5, 4, 3]],'float64')
+  # 2x5
+  my_C = numpy.array([[5, 4, 3, 2, 1],[2, 1, 2, 3, 4]],'float64')
+
+  zC = tnorm(my_A, my_C)
+  zC_py = tnorm(my_A, my_C)
+  assert (abs(zC - zC_py) < 1e-7).all()
+
+  empty = numpy.zeros(shape=(0,0), dtype=numpy.float64)
+  zC = bob.learn.em.ztnorm(my_A, empty, my_C, empty)
+  assert (abs(zC - zC_py) < 1e-7).all()
+
+def test_znorm_simple():
+  # 3x5
+  my_A = numpy.array([[1, 2, 3, 4, 5],
+                      [6, 7, 8, 9, 8],
+                      [7, 6, 5, 4, 3]], numpy.float64)
+  # 3x4
+  my_B = numpy.array([[5, 4, 7, 8],[9, 8, 7, 4],[5, 6, 3, 2]], numpy.float64)
+
+  zA = znorm(my_A, my_B)
+  zA_py = znorm(my_A, my_B)
+  assert (abs(zA - zA_py) < 1e-7).all()
+
+  empty = numpy.zeros(shape=(0,0), dtype=numpy.float64)
+  zA = bob.learn.em.ztnorm(my_A, my_B, empty, empty)
+  assert (abs(zA - zA_py) < 1e-7).all()
diff --git a/bob/learn/em/version.cpp b/bob/learn/em/version.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..d3c36a9a80865f123deb2a1b21b7fb70116749bb
--- /dev/null
+++ b/bob/learn/em/version.cpp
@@ -0,0 +1,224 @@
+/**
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @date Mon Apr 14 20:43:48 CEST 2014
+ *
+ * @brief Binds configuration information available from bob
+ */
+
+#ifdef NO_IMPORT_ARRAY
+#undef NO_IMPORT_ARRAY
+#endif
+#include <bob.blitz/capi.h>
+#include <bob.blitz/cleanup.h>
+
+#include <bob.core/config.h>
+#include <bob.io.base/config.h>
+#include <bob.sp/config.h>
+#include <bob.math/config.h>
+#include <bob.learn.activation/config.h>
+#include <bob.learn.linear/config.h>
+// TODO: add other dependencies
+
+#include <string>
+#include <cstdlib>
+#include <blitz/blitz.h>
+#include <boost/preprocessor/stringize.hpp>
+#include <boost/version.hpp>
+#include <boost/format.hpp>
+
+
+static int dict_set(PyObject* d, const char* key, const char* value) {
+  PyObject* v = Py_BuildValue("s", value);
+  if (!v) return 0;
+  int retval = PyDict_SetItemString(d, key, v);
+  Py_DECREF(v);
+  if (retval == 0) return 1; //all good
+  return 0; //a problem occurred
+}
+
+static int dict_steal(PyObject* d, const char* key, PyObject* value) {
+  if (!value) return 0;
+  int retval = PyDict_SetItemString(d, key, value);
+  Py_DECREF(value);
+  if (retval == 0) return 1; //all good
+  return 0; //a problem occurred
+}
+
+/**
+ * Describes the version of Boost libraries installed
+ */
+static PyObject* boost_version() {
+  boost::format f("%d.%d.%d");
+  f % (BOOST_VERSION / 100000);
+  f % (BOOST_VERSION / 100 % 1000);
+  f % (BOOST_VERSION % 100);
+  return Py_BuildValue("s", f.str().c_str());
+}
+
+/**
+ * Describes the compiler version
+ */
+static PyObject* compiler_version() {
+# if defined(__GNUC__) && !defined(__llvm__)
+  boost::format f("%s.%s.%s");
+  f % BOOST_PP_STRINGIZE(__GNUC__);
+  f % BOOST_PP_STRINGIZE(__GNUC_MINOR__);
+  f % BOOST_PP_STRINGIZE(__GNUC_PATCHLEVEL__);
+  return Py_BuildValue("ss", "gcc", f.str().c_str());
+# elif defined(__llvm__) && !defined(__clang__)
+  return Py_BuildValue("ss", "llvm-gcc", __VERSION__);
+# elif defined(__clang__)
+  return Py_BuildValue("ss", "clang", __clang_version__);
+# else
+  return Py_BuildValue("s", "unsupported");
+# endif
+}
+
+/**
+ * Python version with which we compiled the extensions
+ */
+static PyObject* python_version() {
+  boost::format f("%s.%s.%s");
+  f % BOOST_PP_STRINGIZE(PY_MAJOR_VERSION);
+  f % BOOST_PP_STRINGIZE(PY_MINOR_VERSION);
+  f % BOOST_PP_STRINGIZE(PY_MICRO_VERSION);
+  return Py_BuildValue("s", f.str().c_str());
+}
+
+/**
+ * Numpy version
+ */
+static PyObject* numpy_version() {
+  return Py_BuildValue("{ssss}", "abi", BOOST_PP_STRINGIZE(NPY_VERSION),
+      "api", BOOST_PP_STRINGIZE(NPY_API_VERSION));
+}
+
+/**
+ * bob.blitz c/c++ api version
+ */
+static PyObject* bob_blitz_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_BLITZ_API_VERSION));
+}
+
+/**
+ * bob.core c/c++ api version
+ */
+static PyObject* bob_core_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_CORE_API_VERSION));
+}
+
+/**
+ * bob.io.base c/c++ api version
+ */
+static PyObject* bob_io_base_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_IO_BASE_API_VERSION));
+}
+
+/**
+ * bob.sp c/c++ api version
+ */
+static PyObject* bob_sp_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_SP_API_VERSION));
+}
+
+/**
+ * bob.math c/c++ api version
+ */
+static PyObject* bob_math_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_MATH_API_VERSION));
+}
+
+/**
+ * bob.learn.activation c/c++ api version
+ */
+static PyObject* bob_learn_activation_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_LEARN_ACTIVATION_API_VERSION));
+}
+
+/**
+ * bob.learn.linear c/c++ api version
+ */
+static PyObject* bob_learn_linear_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_LEARN_LINEAR_API_VERSION));
+}
+
+
+static PyObject* build_version_dictionary() {
+
+  PyObject* retval = PyDict_New();
+  if (!retval) return 0;
+  auto retval_ = make_safe(retval);
+
+  if (!dict_set(retval, "Blitz++", BZ_VERSION)) return 0;
+  if (!dict_steal(retval, "Boost", boost_version())) return 0;
+  if (!dict_steal(retval, "Compiler", compiler_version())) return 0;
+  if (!dict_steal(retval, "Python", python_version())) return 0;
+  if (!dict_steal(retval, "NumPy", numpy_version())) return 0;
+  if (!dict_steal(retval, "bob.blitz", bob_blitz_version())) return 0;
+  if (!dict_steal(retval, "bob.core", bob_core_version())) return 0;
+  if (!dict_steal(retval, "bob.io.base", bob_io_base_version())) return 0;
+  if (!dict_steal(retval, "bob.sp", bob_sp_version())) return 0;
+  if (!dict_steal(retval, "bob.math", bob_math_version())) return 0;
+  if (!dict_steal(retval, "bob.learn.activation", bob_learn_activation_version())) return 0;
+  if (!dict_steal(retval, "bob.learn.linear", bob_learn_linear_version())) return 0;
+  if (!dict_steal(retval, "Bob", bob_core_version())) return 0;
+
+  Py_INCREF(retval);
+  return retval;
+}
+
+static PyMethodDef module_methods[] = {
+    {0}  /* Sentinel */
+};
+
+PyDoc_STRVAR(module_docstr,
+"Information about software used to compile the C++ Bob API"
+);
+
+#if PY_VERSION_HEX >= 0x03000000
+static PyModuleDef module_definition = {
+  PyModuleDef_HEAD_INIT,
+  BOB_EXT_MODULE_NAME,
+  module_docstr,
+  -1,
+  module_methods,
+  0, 0, 0, 0
+};
+#endif
+
+static PyObject* create_module (void) {
+
+# if PY_VERSION_HEX >= 0x03000000
+  PyObject* m = PyModule_Create(&module_definition);
+# else
+  PyObject* m = Py_InitModule3(BOB_EXT_MODULE_NAME, module_methods, module_docstr);
+# endif
+  if (!m) return 0;
+  auto m_ = make_safe(m); ///< protects against early returns
+
+  /* register version numbers and constants */
+  if (PyModule_AddStringConstant(m, "module", BOB_EXT_MODULE_VERSION) < 0)
+    return 0;
+
+  PyObject* externals = build_version_dictionary();
+  if (!externals) return 0;
+  if (PyModule_AddObject(m, "externals", externals) < 0) return 0;
+
+  /* imports dependencies */
+  if (import_bob_blitz() < 0) {
+    PyErr_Print();
+    PyErr_Format(PyExc_ImportError, "cannot import `%s'", BOB_EXT_MODULE_NAME);
+    return 0;
+  }
+
+  Py_INCREF(m);
+  return m;
+
+}
+
+PyMODINIT_FUNC BOB_EXT_ENTRY_NAME (void) {
+# if PY_VERSION_HEX >= 0x03000000
+  return
+# endif
+    create_module();
+}
diff --git a/bob/learn/em/ztnorm.cpp b/bob/learn/em/ztnorm.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2164aed3e63199de8923967ee5fe955942bb2063
--- /dev/null
+++ b/bob/learn/em/ztnorm.cpp
@@ -0,0 +1,146 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Sat 31 Jan 02:46:48 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+
+/*** zt_norm ***/
+static auto zt_norm = bob::extension::FunctionDoc(
+  "ztnorm",
+  "",
+  0,
+  true
+)
+.add_prototype("rawscores_probes_vs_models,rawscores_zprobes_vs_models,rawscores_probes_vs_tmodels,rawscores_zprobes_vs_tmodels,mask_zprobes_vs_tmodels_istruetrial", "output")
+.add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
+.add_parameter("rawscores_zprobes_vs_models", "array_like <float, 2D>", "")
+.add_parameter("rawscores_probes_vs_tmodels", "array_like <float, 2D>", "")
+.add_parameter("rawscores_zprobes_vs_tmodels", "array_like <float, 2D>", "")
+.add_parameter("mask_zprobes_vs_tmodels_istruetrial", "array_like <float, 2D>", "")
+.add_return("output","array_like <float, 2D>","");
+static PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = zt_norm.kwlist(0);
+  
+  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_zprobes_vs_models_o, *rawscores_probes_vs_tmodels_o, 
+  *rawscores_zprobes_vs_tmodels_o, *mask_zprobes_vs_tmodels_istruetrial_o;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O&|O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
+                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_models_o,
+                                                                       &PyBlitzArray_Converter, &rawscores_probes_vs_tmodels_o,
+                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_tmodels_o,
+                                                                       &PyBlitzArray_Converter, &mask_zprobes_vs_tmodels_istruetrial_o)){
+    zt_norm.print_usage();
+    Py_RETURN_NONE;
+  }
+
+  // get the number of command line arguments
+  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
+  auto rawscores_zprobes_vs_models_         = make_safe(rawscores_zprobes_vs_models_o);
+  auto rawscores_probes_vs_tmodels_         = make_safe(rawscores_probes_vs_tmodels_o);
+  auto rawscores_zprobes_vs_tmodels_        = make_safe(rawscores_zprobes_vs_tmodels_o);
+  //auto mask_zprobes_vs_tmodels_istruetrial_ = make_safe(mask_zprobes_vs_tmodels_istruetrial_o);
+
+  blitz::Array<double,2>  rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
+  blitz::Array<double,2> normalized_scores = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
+
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  if(nargs==4)
+    bob::learn::em::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o),
+                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o),
+                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o),
+                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_tmodels_o),
+                             normalized_scores);
+  else
+    bob::learn::em::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
+                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o), 
+                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o), 
+                             *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_tmodels_o), 
+                             *PyBlitzArrayCxx_AsBlitz<bool,2>(mask_zprobes_vs_tmodels_istruetrial_o),
+                             normalized_scores);
+
+  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
+}
+
+
+
+/*** t_norm ***/
+static auto t_norm = bob::extension::FunctionDoc(
+  "tnorm",
+  "",
+  0,
+  true
+)
+.add_prototype("rawscores_probes_vs_models,rawscores_probes_vs_tmodels", "output")
+.add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
+.add_parameter("rawscores_probes_vs_tmodels", "array_like <float, 2D>", "")
+.add_return("output","array_like <float, 2D>","");
+static PyObject* PyBobLearnEM_tNorm(PyObject*, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = zt_norm.kwlist(0);
+  
+  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_probes_vs_tmodels_o;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
+                                                                       &PyBlitzArray_Converter, &rawscores_probes_vs_tmodels_o)){
+    zt_norm.print_usage();
+    Py_RETURN_NONE;
+  }
+  
+  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
+  auto rawscores_probes_vs_tmodels_         = make_safe(rawscores_probes_vs_tmodels_o);
+
+  blitz::Array<double,2>  rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
+  blitz::Array<double,2> normalized_scores = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
+
+  bob::learn::em::tNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
+                           *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o),
+                           normalized_scores);
+
+  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
+}
+
+
+/*** z_norm ***/
+static auto z_norm = bob::extension::FunctionDoc(
+  "znorm",
+  "",
+  0,
+  true
+)
+.add_prototype("rawscores_probes_vs_models,rawscores_zprobes_vs_models", "output")
+.add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
+.add_parameter("rawscores_zprobes_vs_models", "array_like <float, 2D>", "")
+.add_return("output","array_like <float, 2D>","");
+static PyObject* PyBobLearnEM_zNorm(PyObject*, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = zt_norm.kwlist(0);
+  
+  PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_zprobes_vs_models_o;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
+                                                                       &PyBlitzArray_Converter, &rawscores_zprobes_vs_models_o)){
+    zt_norm.print_usage();
+    Py_RETURN_NONE;
+  }
+  
+  auto rawscores_probes_vs_models_          = make_safe(rawscores_probes_vs_models_o);
+  auto rawscores_zprobes_vs_models_         = make_safe(rawscores_zprobes_vs_models_o);
+
+  blitz::Array<double,2> rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
+  blitz::Array<double,2> normalized_scores          = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
+
+
+  bob::learn::em::zNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o), 
+                           *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o),
+                           normalized_scores);
+
+  return PyBlitzArrayCxx_AsConstNumpy(normalized_scores);
+}
+