diff --git a/bob/learn/em/ivector_machine.cpp b/bob/learn/em/ivector_machine.cpp
index 82df5e057c86f43b1b42ea5101ce97298ee99a90..db6e55d516d6a5fba4986a0473fa73dc7ac8ce57 100644
--- a/bob/learn/em/ivector_machine.cpp
+++ b/bob/learn/em/ivector_machine.cpp
@@ -72,23 +72,23 @@ static int PyBobLearnEMIVectorMachine_init_hdf5(PyBobLearnEMIVectorMachineObject
 static int PyBobLearnEMIVectorMachine_init_ubm(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = IVectorMachine_doc.kwlist(0);
-  
+
   PyBobLearnEMGMMMachineObject* gmm_machine;
   int rt = 1;
   double variance_threshold = 1e-10;
 
-  //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, 
+  //Here we have to select which keyword argument to read
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
                                                                   &rt, &variance_threshold)){
     IVectorMachine_doc.print_usage();
     return -1;
   }
-    
+
   if(rt < 1){
     PyErr_Format(PyExc_TypeError, "rt argument must be greater than or equal to one");
     return -1;
   }
-  
+
   if(variance_threshold <= 0){
     PyErr_Format(PyExc_TypeError, "variance_threshold argument must be greater than zero");
     return -1;
@@ -130,7 +130,7 @@ static int PyBobLearnEMIVectorMachine_init(PyBobLearnEMIVectorMachineObject* sel
     IVectorMachine_doc.print_usage();
     return -1;
   }
-  
+
   BOB_CATCH_MEMBER("cannot create IVectorMachine", 0)
   return 0;
 }
@@ -189,7 +189,7 @@ static auto supervector_length = bob::extension::VariableDoc(
 
   "Returns the supervector length."
   "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
+
   "@warning An exception is thrown if no Universal Background Model has been set yet."
 );
 PyObject* PyBobLearnEMIVectorMachine_getSupervectorLength(PyBobLearnEMIVectorMachineObject* self, void*) {
@@ -320,11 +320,11 @@ int PyBobLearnEMIVectorMachine_setUBM(PyBobLearnEMIVectorMachineObject* self, Py
   self->cxx->setUbm(ubm_gmmMachine->cxx);
 
   return 0;
-  BOB_CATCH_MEMBER("ubm could not be set", -1)  
+  BOB_CATCH_MEMBER("ubm could not be set", -1)
 }
 
 
-static PyGetSetDef PyBobLearnEMIVectorMachine_getseters[] = { 
+static PyGetSetDef PyBobLearnEMIVectorMachine_getseters[] = {
   {
    shape.name(),
    (getter)PyBobLearnEMIVectorMachine_getShape,
@@ -332,7 +332,7 @@ static PyGetSetDef PyBobLearnEMIVectorMachine_getseters[] = {
    shape.doc(),
    0
   },
-  
+
   {
    supervector_length.name(),
    (getter)PyBobLearnEMIVectorMachine_getSupervectorLength,
@@ -340,7 +340,7 @@ static PyGetSetDef PyBobLearnEMIVectorMachine_getseters[] = {
    supervector_length.doc(),
    0
   },
-  
+
   {
    T.name(),
    (getter)PyBobLearnEMIVectorMachine_getT,
@@ -393,9 +393,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMIVectorMachine_Save(PyBobLearnEMIVectorMachineObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -415,12 +415,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMIVectorMachine_Load(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -431,7 +431,7 @@ static PyObject* PyBobLearnEMIVectorMachine_Load(PyBobLearnEMIVectorMachineObjec
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this IVectorMachine with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -456,8 +456,8 @@ static PyObject* PyBobLearnEMIVectorMachine_IsSimilarTo(PyBobLearnEMIVectorMachi
         &PyBobLearnEMIVectorMachine_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -468,22 +468,22 @@ static PyObject* PyBobLearnEMIVectorMachine_IsSimilarTo(PyBobLearnEMIVectorMachi
 
 
 
-/*** forward ***/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
-  "Execute the machine",
-  "", 
+/*** project ***/
+static auto project = bob::extension::FunctionDoc(
+  "project",
+  "Projects the given GMM statistics into the i-vector subspace",
+  ".. note:: The :py:meth:`__call__` function is an alias for this function",
   true
 )
 .add_prototype("stats")
 .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnEMIVectorMachine_Forward(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
+static PyObject* PyBobLearnEMIVectorMachine_project(PyBobLearnEMIVectorMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
-  char** kwlist = forward.kwlist(0);
+  char** kwlist = project.kwlist(0);
 
   PyBobLearnEMGMMStatsObject* stats = 0;
-  
+
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
     return 0;
 
@@ -491,8 +491,8 @@ static PyObject* PyBobLearnEMIVectorMachine_Forward(PyBobLearnEMIVectorMachineOb
    self->cxx->forward(*stats->cxx, ivector);
 
   return PyBlitzArrayCxx_AsConstNumpy(ivector);
-  
-  BOB_CATCH_MEMBER("cannot forward", 0)
+
+  BOB_CATCH_MEMBER("cannot project", 0)
 
 }
 
@@ -533,7 +533,7 @@ static PyObject* PyBobLearnEMIVectorMachine_resize(PyBobLearnEMIVectorMachineObj
 static auto __compute_Id_TtSigmaInvT__ = bob::extension::FunctionDoc(
   "__compute_Id_TtSigmaInvT__",
   "",
-  "", 
+  "",
   true
 )
 .add_prototype("stats")
@@ -544,7 +544,7 @@ static PyObject* PyBobLearnEMIVectorMachine_compute_Id_TtSigmaInvT__(PyBobLearnE
   char** kwlist = __compute_Id_TtSigmaInvT__.kwlist(0);
 
   PyBobLearnEMGMMStatsObject* stats = 0;
-  
+
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
     return 0;
 
@@ -552,7 +552,7 @@ static PyObject* PyBobLearnEMIVectorMachine_compute_Id_TtSigmaInvT__(PyBobLearnE
   blitz::Array<double,2> output(self->cxx->getDimRt(), self->cxx->getDimRt());
   self->cxx->computeIdTtSigmaInvT(*stats->cxx, output);
   return PyBlitzArrayCxx_AsConstNumpy(output);
-  
+
   BOB_CATCH_MEMBER("cannot __compute_Id_TtSigmaInvT__", 0)
 }
 
@@ -562,7 +562,7 @@ static PyObject* PyBobLearnEMIVectorMachine_compute_Id_TtSigmaInvT__(PyBobLearnE
 static auto __compute_TtSigmaInvFnorm__ = bob::extension::FunctionDoc(
   "__compute_TtSigmaInvFnorm__",
   "",
-  "", 
+  "",
   true
 )
 .add_prototype("stats")
@@ -573,7 +573,7 @@ static PyObject* PyBobLearnEMIVectorMachine_compute_TtSigmaInvFnorm__(PyBobLearn
   char** kwlist = __compute_TtSigmaInvFnorm__.kwlist(0);
 
   PyBobLearnEMGMMStatsObject* stats = 0;
-  
+
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
     return 0;
 
@@ -581,7 +581,7 @@ static PyObject* PyBobLearnEMIVectorMachine_compute_TtSigmaInvFnorm__(PyBobLearn
   blitz::Array<double,1> output(self->cxx->getDimRt());
   self->cxx->computeTtSigmaInvFnorm(*stats->cxx, output);
   return PyBlitzArrayCxx_AsConstNumpy(output);
-  
+
   BOB_CATCH_MEMBER("cannot __compute_TtSigmaInvFnorm__", 0)
 }
 
@@ -613,6 +613,12 @@ static PyMethodDef PyBobLearnEMIVectorMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     resize.doc()
   },
+  {
+    project.name(),
+    (PyCFunction)PyBobLearnEMIVectorMachine_project,
+    METH_VARARGS|METH_KEYWORDS,
+    project.doc()
+  },
   {
     __compute_Id_TtSigmaInvT__.name(),
     (PyCFunction)PyBobLearnEMIVectorMachine_compute_Id_TtSigmaInvT__,
@@ -655,7 +661,7 @@ bool init_BobLearnEMIVectorMachine(PyObject* module)
   PyBobLearnEMIVectorMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMIVectorMachine_RichCompare);
   PyBobLearnEMIVectorMachine_Type.tp_methods     = PyBobLearnEMIVectorMachine_methods;
   PyBobLearnEMIVectorMachine_Type.tp_getset      = PyBobLearnEMIVectorMachine_getseters;
-  PyBobLearnEMIVectorMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnEMIVectorMachine_Forward);
+  PyBobLearnEMIVectorMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnEMIVectorMachine_project);
 
 
   // check that everything is fine
@@ -665,4 +671,3 @@ bool init_BobLearnEMIVectorMachine(PyObject* module)
   Py_INCREF(&PyBobLearnEMIVectorMachine_Type);
   return PyModule_AddObject(module, "IVectorMachine", (PyObject*)&PyBobLearnEMIVectorMachine_Type) >= 0;
 }
-
diff --git a/bob/learn/em/jfa_machine.cpp b/bob/learn/em/jfa_machine.cpp
index 9f1f7e32fb7cddbc0777c6f54f291ebc44b8f6c0..ca7ce963ada9f16ee5f177679860a69ed26ab15c 100644
--- a/bob/learn/em/jfa_machine.cpp
+++ b/bob/learn/em/jfa_machine.cpp
@@ -69,15 +69,15 @@ static int PyBobLearnEMJFAMachine_init_hdf5(PyBobLearnEMJFAMachineObject* self,
 static int PyBobLearnEMJFAMachine_init_jfabase(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = JFAMachine_doc.kwlist(0);
-  
+
   PyBobLearnEMJFABaseObject* jfa_base;
 
-  //Here we have to select which keyword argument to read  
+  //Here we have to select which keyword argument to read
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base)){
     JFAMachine_doc.print_usage();
     return -1;
   }
-  
+
   self->cxx.reset(new bob::learn::em::JFAMachine(jfa_base->cxx));
   return 0;
 }
@@ -115,7 +115,7 @@ static int PyBobLearnEMJFAMachine_init(PyBobLearnEMJFAMachineObject* self, PyObj
     JFAMachine_doc.print_usage();
     return -1;
   }
-  
+
   BOB_CATCH_MEMBER("cannot create JFAMachine", 0)
   return 0;
 }
@@ -174,7 +174,7 @@ static auto supervector_length = bob::extension::VariableDoc(
 
   "Returns the supervector length."
   "NGaussians x NInputs: Number of Gaussian components by the feature dimensionality",
-  
+
   "@warning An exception is thrown if no Universal Background Model has been set yet."
 );
 PyObject* PyBobLearnEMJFAMachine_getSupervectorLength(PyBobLearnEMJFAMachineObject* self, void*) {
@@ -204,12 +204,12 @@ int PyBobLearnEMJFAMachine_setY(PyBobLearnEMJFAMachineObject* self, PyObject* va
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, Y.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, Y.name());
@@ -220,8 +220,8 @@ int PyBobLearnEMJFAMachine_setY(PyBobLearnEMJFAMachineObject* self, PyObject* va
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getY().extent(0), (Py_ssize_t)input->shape[0], Y.name());
     return -1;
   }
-  
-  
+
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "y");
   if (!b) return -1;
   self->cxx->setY(*b);
@@ -250,23 +250,23 @@ int PyBobLearnEMJFAMachine_setZ(PyBobLearnEMJFAMachineObject* self, PyObject* va
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, Z.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, Z.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getZ().extent(0)) {
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getZ().extent(0), (Py_ssize_t)input->shape[0], Z.name());
     return -1;
   }
-  
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "z");
   if (!b) return -1;
   self->cxx->setZ(*b);
@@ -323,13 +323,13 @@ int PyBobLearnEMJFAMachine_setJFABase(PyBobLearnEMJFAMachineObject* self, PyObje
   self->cxx->setJFABase(jfa_base_o->cxx);
 
   return 0;
-  BOB_CATCH_MEMBER("jfa_base could not be set", -1)  
+  BOB_CATCH_MEMBER("jfa_base could not be set", -1)
 }
 
 
 
 
-static PyGetSetDef PyBobLearnEMJFAMachine_getseters[] = { 
+static PyGetSetDef PyBobLearnEMJFAMachine_getseters[] = {
   {
    shape.name(),
    (getter)PyBobLearnEMJFAMachine_getShape,
@@ -337,7 +337,7 @@ static PyGetSetDef PyBobLearnEMJFAMachine_getseters[] = {
    shape.doc(),
    0
   },
-  
+
   {
    supervector_length.name(),
    (getter)PyBobLearnEMJFAMachine_getSupervectorLength,
@@ -345,7 +345,7 @@ static PyGetSetDef PyBobLearnEMJFAMachine_getseters[] = {
    supervector_length.doc(),
    0
   },
-  
+
   {
    jfa_base.name(),
    (getter)PyBobLearnEMJFAMachine_getJFABase,
@@ -398,9 +398,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMJFAMachine_Save(PyBobLearnEMJFAMachineObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -420,12 +420,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMJFAMachine_Load(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -436,7 +436,7 @@ static PyObject* PyBobLearnEMJFAMachine_Load(PyBobLearnEMJFAMachineObject* self,
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this JFAMachine with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -461,8 +461,8 @@ static PyObject* PyBobLearnEMJFAMachine_IsSimilarTo(PyBobLearnEMJFAMachineObject
         &PyBobLearnEMJFAMachine_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -476,7 +476,7 @@ static PyObject* PyBobLearnEMJFAMachine_IsSimilarTo(PyBobLearnEMJFAMachineObject
 static auto estimate_x = bob::extension::FunctionDoc(
   "estimate_x",
   "Estimates the session offset x (LPT assumption) given GMM statistics.",
-  "Estimates x from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM", 
+  "Estimates x from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM",
   true
 )
 .add_prototype("stats,input")
@@ -490,29 +490,29 @@ static PyObject* PyBobLearnEMJFAMachine_estimateX(PyBobLearnEMJFAMachineObject*
   PyBobLearnEMGMMStatsObject* stats = 0;
   PyBlitzArrayObject* input           = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
                                                                  &PyBlitzArray_Converter,&input))
     return 0;
 
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, estimate_x.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, estimate_x.name());
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()) {
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), (Py_ssize_t)input->shape[0], estimate_x.name());
     return 0;
   }
-  
+
   self->cxx->estimateX(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
 
   BOB_CATCH_MEMBER("cannot estimate X", 0)
@@ -524,7 +524,7 @@ static PyObject* PyBobLearnEMJFAMachine_estimateX(PyBobLearnEMJFAMachineObject*
 static auto estimate_ux = bob::extension::FunctionDoc(
   "estimate_ux",
   "Estimates Ux (LPT assumption) given GMM statistics.",
-  "Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.", 
+  "Estimates Ux from the GMM statistics considering the LPT assumption, that is the latent session variable x is approximated using the UBM.",
   true
 )
 .add_prototype("stats,input")
@@ -538,29 +538,29 @@ static PyObject* PyBobLearnEMJFAMachine_estimateUx(PyBobLearnEMJFAMachineObject*
   PyBobLearnEMGMMStatsObject* stats = 0;
   PyBlitzArrayObject* input           = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
                                                                  &PyBlitzArray_Converter,&input))
     return 0;
 
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, estimate_ux.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, estimate_ux.name());
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs()) {
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs()*(Py_ssize_t)self->cxx->getNGaussians(), (Py_ssize_t)input->shape[0], estimate_ux.name());
     return 0;
   }
-  
+
   self->cxx->estimateUx(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(input));
 
   BOB_CATCH_MEMBER("cannot estimate Ux", 0)
@@ -572,7 +572,7 @@ static PyObject* PyBobLearnEMJFAMachine_estimateUx(PyBobLearnEMJFAMachineObject*
 static auto forward_ux = bob::extension::FunctionDoc(
   "forward_ux",
   "Computes a score for the given UBM statistics and given the Ux vector",
-  "", 
+  "",
   true
 )
 .add_prototype("stats,ux")
@@ -586,53 +586,53 @@ static PyObject* PyBobLearnEMJFAMachine_ForwardUx(PyBobLearnEMJFAMachineObject*
   PyBobLearnEMGMMStatsObject* stats = 0;
   PyBlitzArrayObject* ux_input        = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats, 
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMStats_Type, &stats,
                                                                  &PyBlitzArray_Converter,&ux_input))
     return 0;
 
   //protects acquired resources through this scope
   auto ux_input_ = make_safe(ux_input);
 
-  // perform check on the input  
+  // perform check on the input
   if (ux_input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, forward_ux.name());
     return 0;
-  }  
+  }
 
   if (ux_input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, forward_ux.name());
     return 0;
-  }  
+  }
 
   if (ux_input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs()) {
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d, elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs(), (Py_ssize_t)ux_input->shape[0], forward_ux.name());
     return 0;
   }
-  
+
   double score = self->cxx->forward(*stats->cxx, *PyBlitzArrayCxx_AsBlitz<double,1>(ux_input));
-  
+
   return Py_BuildValue("d", score);
   BOB_CATCH_MEMBER("cannot forward_ux", 0)
 
 }
 
 
-/*** forward ***/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
-  "Execute the machine",
-  "", 
+/*** log_likelihood ***/
+static auto log_likelihood = bob::extension::FunctionDoc(
+  "log_likelihood",
+  "Computes the log-likelihood of the given samples",
+  ".. note:: the :py:meth:`__call__` function is an alias for this function.",
   true
 )
 .add_prototype("stats")
 .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics as input");
-static PyObject* PyBobLearnEMJFAMachine_Forward(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
+static PyObject* PyBobLearnEMJFAMachine_log_likelihood(PyBobLearnEMJFAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
-  char** kwlist = forward.kwlist(0);
+  char** kwlist = log_likelihood.kwlist(0);
 
   PyBobLearnEMGMMStatsObject* stats = 0;
-  
+
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMStats_Type, &stats))
     return 0;
 
@@ -640,7 +640,7 @@ static PyObject* PyBobLearnEMJFAMachine_Forward(PyBobLearnEMJFAMachineObject* se
   double score = self->cxx->forward(*stats->cxx);
 
   return Py_BuildValue("d", score);
-  BOB_CATCH_MEMBER("cannot forward", 0)
+  BOB_CATCH_MEMBER("cannot log_likelihood", 0)
 
 }
 
@@ -664,14 +664,14 @@ static PyMethodDef PyBobLearnEMJFAMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     is_similar_to.doc()
   },
-  
+
   {
     estimate_x.name(),
     (PyCFunction)PyBobLearnEMJFAMachine_estimateX,
     METH_VARARGS|METH_KEYWORDS,
     estimate_x.doc()
   },
-  
+
   {
     estimate_ux.name(),
     (PyCFunction)PyBobLearnEMJFAMachine_estimateUx,
@@ -685,13 +685,12 @@ static PyMethodDef PyBobLearnEMJFAMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     forward_ux.doc()
   },
-/*
   {
-    forward.name(),
-    (PyCFunction)PyBobLearnEMJFAMachine_Forward,
+    log_likelihood.name(),
+    (PyCFunction)PyBobLearnEMJFAMachine_log_likelihood,
     METH_VARARGS|METH_KEYWORDS,
-    forward.doc()
-  },*/
+    log_likelihood.doc()
+  },
 
 
   {0} /* Sentinel */
@@ -723,7 +722,7 @@ bool init_BobLearnEMJFAMachine(PyObject* module)
   PyBobLearnEMJFAMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMJFAMachine_RichCompare);
   PyBobLearnEMJFAMachine_Type.tp_methods     = PyBobLearnEMJFAMachine_methods;
   PyBobLearnEMJFAMachine_Type.tp_getset      = PyBobLearnEMJFAMachine_getseters;
-  PyBobLearnEMJFAMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnEMJFAMachine_Forward);
+  PyBobLearnEMJFAMachine_Type.tp_call        = reinterpret_cast<ternaryfunc>(PyBobLearnEMJFAMachine_log_likelihood);
 
 
   // check that everything is fine
@@ -733,4 +732,3 @@ bool init_BobLearnEMJFAMachine(PyObject* module)
   Py_INCREF(&PyBobLearnEMJFAMachine_Type);
   return PyModule_AddObject(module, "JFAMachine", (PyObject*)&PyBobLearnEMJFAMachine_Type) >= 0;
 }
-
diff --git a/bob/learn/em/plda_machine.cpp b/bob/learn/em/plda_machine.cpp
index 4174084a90a396f22b86709fb5a5c92e035f66bc..0224d96b35615448481ce8db6a4bdbc54c1528bc 100644
--- a/bob/learn/em/plda_machine.cpp
+++ b/bob/learn/em/plda_machine.cpp
@@ -35,7 +35,7 @@ static auto PLDAMachine_doc = bob::extension::ClassDoc(
   .add_prototype("other","")
   .add_prototype("hdf5,plda_base","")
 
-  .add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "")  
+  .add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "")
   .add_parameter("other", ":py:class:`bob.learn.em.PLDAMachine`", "A PLDAMachine object to be copied.")
   .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
 
@@ -76,15 +76,15 @@ static int PyBobLearnEMPLDAMachine_init_hdf5(PyBobLearnEMPLDAMachineObject* self
 
 static int PyBobLearnEMPLDAMachine_init_pldabase(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
 
-  char** kwlist = PLDAMachine_doc.kwlist(0);  
+  char** kwlist = PLDAMachine_doc.kwlist(0);
   PyBobLearnEMPLDABaseObject* plda_base;
-  
-  //Here we have to select which keyword argument to read  
+
+  //Here we have to select which keyword argument to read
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base)){
     PLDAMachine_doc.print_usage();
     return -1;
   }
-  
+
   self->cxx.reset(new bob::learn::em::PLDAMachine(plda_base->cxx));
   return 0;
 }
@@ -94,7 +94,7 @@ static int PyBobLearnEMPLDAMachine_init(PyBobLearnEMPLDAMachineObject* self, PyO
 
   // get the number of command line arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
- 
+
   if(nargs==1){
     //Reading the input argument
     PyObject* arg = 0;
@@ -265,7 +265,7 @@ int PyBobLearnEMPLDAMachine_setPLDABase(PyBobLearnEMPLDAMachineObject* self, PyO
   self->cxx->setPLDABase(plda_base_o->cxx);
 
   return 0;
-  BOB_CATCH_MEMBER("plda_base could not be set", -1)  
+  BOB_CATCH_MEMBER("plda_base could not be set", -1)
 }
 
 
@@ -323,21 +323,21 @@ int PyBobLearnEMPLDAMachine_setLogLikelihood(PyBobLearnEMPLDAMachineObject* self
 }
 
 
-static PyGetSetDef PyBobLearnEMPLDAMachine_getseters[] = { 
+static PyGetSetDef PyBobLearnEMPLDAMachine_getseters[] = {
   {
    shape.name(),
    (getter)PyBobLearnEMPLDAMachine_getShape,
    0,
    shape.doc(),
    0
-  },  
+  },
   {
    n_samples.name(),
    (getter)PyBobLearnEMPLDAMachine_getNSamples,
    (setter)PyBobLearnEMPLDAMachine_setNSamples,
    n_samples.doc(),
    0
-  },  
+  },
   {
    w_sum_xit_beta_xi.name(),
    (getter)PyBobLearnEMPLDAMachine_getWSumXitBetaXi,
@@ -386,9 +386,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMPLDAMachine_Save(PyBobLearnEMPLDAMachineObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -408,12 +408,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMPLDAMachine_Load(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -424,7 +424,7 @@ static PyObject* PyBobLearnEMPLDAMachine_Load(PyBobLearnEMPLDAMachineObject* sel
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this PLDAMachine with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -449,8 +449,8 @@ static PyObject* PyBobLearnEMPLDAMachine_IsSimilarTo(PyBobLearnEMPLDAMachineObje
         &PyBobLearnEMPLDAMachine_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -473,7 +473,7 @@ static auto get_gamma = bob::extension::FunctionDoc(
 .add_return("output","array_like <float, 2D>","Get the :math:`\\gamma` matrix");
 static PyObject* PyBobLearnEMPLDAMachine_getGamma(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_gamma.kwlist(0);
 
   int i = 0;
@@ -497,7 +497,7 @@ static auto has_gamma = bob::extension::FunctionDoc(
 .add_return("output","bool","");
 static PyObject* PyBobLearnEMPLDAMachine_hasGamma(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = has_gamma.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
@@ -506,7 +506,7 @@ static PyObject* PyBobLearnEMPLDAMachine_hasGamma(PyBobLearnEMPLDAMachineObject*
     Py_RETURN_TRUE;
   else
     Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)    
+ BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)
 }
 
 
@@ -524,7 +524,7 @@ static auto get_add_gamma = bob::extension::FunctionDoc(
 .add_return("output","array_like <float, 2D>","");
 static PyObject* PyBobLearnEMPLDAMachine_getAddGamma(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_add_gamma.kwlist(0);
 
   int i = 0;
@@ -548,7 +548,7 @@ static auto has_log_like_const_term = bob::extension::FunctionDoc(
 .add_return("output","bool","");
 static PyObject* PyBobLearnEMPLDAMachine_hasLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = has_log_like_const_term.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
@@ -557,7 +557,7 @@ static PyObject* PyBobLearnEMPLDAMachine_hasLogLikeConstTerm(PyBobLearnEMPLDAMac
     Py_RETURN_TRUE;
   else
     Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)    
+ BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)
 }
 
 
@@ -575,14 +575,14 @@ static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
 .add_return("output","double","");
 static PyObject* PyBobLearnEMPLDAMachine_getAddLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_add_log_like_const_term.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
 
   return Py_BuildValue("d",self->cxx->getAddLogLikeConstTerm(i));
 
-  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)    
+  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)
 }
 
 
@@ -599,14 +599,14 @@ static auto get_log_like_const_term = bob::extension::FunctionDoc(
 .add_return("output","double","");
 static PyObject* PyBobLearnEMPLDAMachine_getLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_log_like_const_term.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
 
   return Py_BuildValue("d",self->cxx->getLogLikeConstTerm(i));
 
-  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)    
+  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)
 }
 
 /***** clear_maps *****/
@@ -619,11 +619,11 @@ static auto clear_maps = bob::extension::FunctionDoc(
 .add_prototype("","");
 static PyObject* PyBobLearnEMPLDAMachine_clearMaps(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   self->cxx->clearMaps();
   Py_RETURN_NONE;
 
-  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)    
+  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)
 }
 
 
@@ -640,30 +640,30 @@ static auto compute_log_likelihood = bob::extension::FunctionDoc(
 .add_return("output","float","The log-likelihood");
 static PyObject* PyBobLearnEMPLDAMachine_computeLogLikelihood(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = compute_log_likelihood.kwlist(0);
-  
+
   PyBlitzArrayObject* samples;
   PyObject* with_enrolled_samples = Py_True;
-   
+
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&|O!", kwlist, &PyBlitzArray_Converter, &samples,
                                                                   &PyBool_Type, &with_enrolled_samples)) return 0;
   auto samples_ = make_safe(samples);
-  
+
   /*Using the proper method according to the dimension*/
   if (samples->ndim==1)
     return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(samples), f(with_enrolled_samples)));
   else
     return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,2>(samples), f(with_enrolled_samples)));
-    
 
-  BOB_CATCH_MEMBER("`compute_log_likelihood` could not be read", 0)    
+
+  BOB_CATCH_MEMBER("`compute_log_likelihood` could not be read", 0)
 }
 
 
-/***** forward *****/
-static auto forward = bob::extension::FunctionDoc(
-  "forward",
+/***** log_likelihood_ratio *****/
+static auto log_likelihood_ratio = bob::extension::FunctionDoc(
+  "log_likelihood_ratio",
   "Computes a log likelihood ratio from a 1D or 2D blitz::Array",
   0,
   true
@@ -671,10 +671,10 @@ static auto forward = bob::extension::FunctionDoc(
 .add_prototype("samples","output")
 .add_parameter("samples", "array_like <float, 1D>,array_like <float, 2D>", "Sample")
 .add_return("output","float","The log-likelihood ratio");
-static PyObject* PyBobLearnEMPLDAMachine_forward(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
+static PyObject* PyBobLearnEMPLDAMachine_log_likelihood_ratio(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = forward.kwlist(0);
+
+  char** kwlist = log_likelihood_ratio.kwlist(0);
 
   PyBlitzArrayObject* samples;
 
@@ -688,7 +688,7 @@ static PyObject* PyBobLearnEMPLDAMachine_forward(PyBobLearnEMPLDAMachineObject*
   else
     return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,2>(samples)));
 
-  BOB_CATCH_MEMBER("`forward` could not be read", 0)    
+  BOB_CATCH_MEMBER("log_likelihood_ratio could not be executed", 0)
 }
 
 
@@ -734,7 +734,7 @@ static PyMethodDef PyBobLearnEMPLDAMachine_methods[] = {
     (PyCFunction)PyBobLearnEMPLDAMachine_hasLogLikeConstTerm,
     METH_VARARGS|METH_KEYWORDS,
     has_log_like_const_term.doc()
-  },  
+  },
   {
     get_add_log_like_const_term.name(),
     (PyCFunction)PyBobLearnEMPLDAMachine_getAddLogLikeConstTerm,
@@ -746,7 +746,7 @@ static PyMethodDef PyBobLearnEMPLDAMachine_methods[] = {
     (PyCFunction)PyBobLearnEMPLDAMachine_getLogLikeConstTerm,
     METH_VARARGS|METH_KEYWORDS,
     get_log_like_const_term.doc()
-  },  
+  },
   {
     clear_maps.name(),
     (PyCFunction)PyBobLearnEMPLDAMachine_clearMaps,
@@ -759,6 +759,12 @@ static PyMethodDef PyBobLearnEMPLDAMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     compute_log_likelihood.doc()
   },
+  {
+    log_likelihood_ratio.name(),
+    (PyCFunction)PyBobLearnEMPLDAMachine_log_likelihood_ratio,
+    METH_VARARGS|METH_KEYWORDS,
+    log_likelihood_ratio.doc()
+  },
   {0} /* Sentinel */
 };
 
@@ -788,7 +794,7 @@ bool init_BobLearnEMPLDAMachine(PyObject* module)
   PyBobLearnEMPLDAMachine_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMPLDAMachine_RichCompare);
   PyBobLearnEMPLDAMachine_Type.tp_methods     = PyBobLearnEMPLDAMachine_methods;
   PyBobLearnEMPLDAMachine_Type.tp_getset      = PyBobLearnEMPLDAMachine_getseters;
-  PyBobLearnEMPLDAMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMPLDAMachine_forward);
+  PyBobLearnEMPLDAMachine_Type.tp_call = reinterpret_cast<ternaryfunc>(PyBobLearnEMPLDAMachine_log_likelihood_ratio);
 
 
   // check that everything is fine
@@ -798,4 +804,3 @@ bool init_BobLearnEMPLDAMachine(PyObject* module)
   Py_INCREF(&PyBobLearnEMPLDAMachine_Type);
   return PyModule_AddObject(module, "PLDAMachine", (PyObject*)&PyBobLearnEMPLDAMachine_Type) >= 0;
 }
-
diff --git a/bob/learn/em/test/test_ivector.py b/bob/learn/em/test/test_ivector.py
index 7e1abd6759cb92972fadcc8c3414acb57733806b..6c7e63c98191c8da6fceee6a3626fcafb11d42ab 100644
--- a/bob/learn/em/test/test_ivector.py
+++ b/bob/learn/em/test/test_ivector.py
@@ -106,7 +106,7 @@ class IVectorMachinePy():
 
     return TtSigmaInvNT
 
-  def forward(self, gmmstats):
+  def project(self, gmmstats):
     if self.m_ubm and self.m_t is not None and self.m_sigma is not None:
       N = gmmstats.n
       F = gmmstats.sum_px
@@ -146,7 +146,7 @@ def test_machine():
   m.set_sigma(sigma)
 
   wij_ref = numpy.array([-0.04213415, 0.21463343]) # Reference from original Chris implementation
-  wij = m.forward(gs)
+  wij = m.project(gs)
   assert numpy.allclose(wij_ref, wij, 1e-5)
 
   # IVector (C++)
@@ -155,5 +155,5 @@ def test_machine():
   mc.sigma = sigma
 
   wij_ref = numpy.array([-0.04213415, 0.21463343]) # Reference from original Chris implementation
-  wij = mc(gs)
+  wij = mc.project(gs)
   assert numpy.allclose(wij_ref, wij, 1e-5)
diff --git a/bob/learn/em/test/test_jfa.py b/bob/learn/em/test/test_jfa.py
index ebcd5d1a44f5dcd4c3ca01d787d7f416452d4f63..6d38736350fc5b36b8147926efb8a2d29a890e9c 100644
--- a/bob/learn/em/test/test_jfa.py
+++ b/bob/learn/em/test/test_jfa.py
@@ -64,8 +64,8 @@ def test_JFABase():
   V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
   d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
   m = JFABase(ubm, ru=1, rv=1)
-  
-  _,_,ru,rv = m.shape 
+
+  _,_,ru,rv = m.shape
   assert ru == 1
   assert rv == 1
 
@@ -73,13 +73,13 @@ def test_JFABase():
   m.resize(2,2)
   m.u = U
   m.v = V
-  m.d = d  
+  m.d = d
   n_gaussians,dim,ru,rv = m.shape
   supervector_length    = m.supervector_length
-  
+
   assert (m.u == U).all()
   assert (m.v == V).all()
-  assert (m.d == d).all()  
+  assert (m.d == d).all()
   assert n_gaussians        == 2
   assert dim                == 3
   assert supervector_length == 6
@@ -144,7 +144,7 @@ def test_ISVBase():
   n_gaussians,dim,ru = m.shape
   supervector_length = m.supervector_length
   assert (m.u == U).all()
-  assert (m.d == d).all()  
+  assert (m.d == d).all()
   assert n_gaussians        == 2
   assert dim                == 3
   assert supervector_length == 6
@@ -207,8 +207,8 @@ def test_JFAMachine():
   m.y = y
   m.z = z
   n_gaussians,dim,ru,rv = m.shape
-  supervector_length    = m.supervector_length  
-  
+  supervector_length    = m.supervector_length
+
   assert n_gaussians        == 2
   assert dim                == 3
   assert supervector_length == 6
@@ -261,7 +261,7 @@ def test_JFAMachine():
   eps = 1e-10
   x_ref = numpy.array([0.291042849767692, 0.310273618998444], 'float64')
   score_ref = -2.111577181208289
-  score = m(gs)
+  score = m.log_likelihood(gs)
   assert numpy.allclose(m.x, x_ref, eps)
   assert abs(score_ref-score) < eps
 
@@ -274,7 +274,7 @@ def test_JFAMachine():
 
   ux = numpy.ndarray((6,), numpy.float64)
   m.estimate_ux(gs, ux)
-  n_gaussians, dim,_,_ = m.shape  
+  n_gaussians, dim,_,_ = m.shape
   ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
   assert numpy.allclose(ux, ux_py, eps)
   assert numpy.allclose(m.x, x, eps)
@@ -310,9 +310,9 @@ def test_ISVMachine():
   z = numpy.array([3,4,1,2,0,1], 'float64')
   m = ISVMachine(base)
   m.z = z
-  
+
   n_gaussians,dim,ru    = m.shape
-  supervector_length    = m.supervector_length  
+  supervector_length    = m.supervector_length
   assert n_gaussians          == 2
   assert dim                  == 3
   assert supervector_length   == 6
@@ -339,7 +339,7 @@ def test_ISVMachine():
   m.z = z
 
   n_gaussians,dim,ru    = m.shape
-  supervector_length    = m.supervector_length  
+  supervector_length    = m.supervector_length
   assert n_gaussians        == 2
   assert dim                == 3
   assert supervector_length == 6
@@ -365,7 +365,7 @@ def test_ISVMachine():
   score_ref = -3.280498193082100
 
   score = m(gs)
-  assert numpy.allclose(m.x, x_ref, eps)  
+  assert numpy.allclose(m.x, x_ref, eps)
   assert abs(score_ref-score) < eps
 
   # Check using alternate forward() method
@@ -378,13 +378,13 @@ def test_ISVMachine():
   # x and Ux
   x = numpy.ndarray((2,), numpy.float64)
   m.estimate_x(gs, x)
-  n_gaussians,dim,_    = m.shape  
+  n_gaussians,dim,_    = m.shape
   x_py = estimate_x(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
   assert numpy.allclose(x, x_py, eps)
 
   ux = numpy.ndarray((6,), numpy.float64)
   m.estimate_ux(gs, ux)
-  n_gaussians,dim,_    = m.shape  
+  n_gaussians,dim,_    = m.shape
   ux_py = estimate_ux(n_gaussians, dim, ubm.mean_supervector, ubm.variance_supervector, U, n, sumpx)
   assert numpy.allclose(ux, ux_py, eps)
   assert numpy.allclose(m.x, x, eps)