diff --git a/bob/learn/em/empca_trainer.cpp b/bob/learn/em/empca_trainer.cpp
index 2db9bb6f30e2357f095d907f454014e92b8395a5..5af5adcf9b1689072794973cab4d2479a71fecbf 100644
--- a/bob/learn/em/empca_trainer.cpp
+++ b/bob/learn/em/empca_trainer.cpp
@@ -29,7 +29,7 @@ static auto EMPCATrainer_doc = bob::extension::ClassDoc(
   .add_prototype("","")
 
   .add_parameter("other", ":py:class:`bob.learn.em.EMPCATrainer`", "A EMPCATrainer object to be copied.")
-  .add_parameter("convergence_threshold", "double", "")
+  .add_parameter("convergence_threshold", "float", "")
 
 );
 
@@ -227,7 +227,7 @@ static auto m_step = bob::extension::FunctionDoc(
   true
 )
 .add_prototype("linear_machine,data")
-.add_parameter("linear_machine", ":py:class:`bob.learn.em.LinearMachine`", "LinearMachine Object")
+.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
 .add_parameter("data", "array_like <float, 2D>", "Input data");
 static PyObject* PyBobLearnEMEMPCATrainer_m_step(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
@@ -258,7 +258,7 @@ static auto compute_likelihood = bob::extension::FunctionDoc(
   true
 )
 .add_prototype("linear_machine,data")
-.add_parameter("linear_machine", ":py:class:`bob.learn.em.LinearMachine`", "LinearMachine Object");
+.add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object");
 static PyObject* PyBobLearnEMEMPCATrainer_compute_likelihood(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
diff --git a/bob/learn/em/gmm_machine.cpp b/bob/learn/em/gmm_machine.cpp
index 6c84d98cf35c65edcec293f339cfbf0fd76d2202..4057bc58bc87426cc0c8551e65828ab1df730c88 100644
--- a/bob/learn/em/gmm_machine.cpp
+++ b/bob/learn/em/gmm_machine.cpp
@@ -98,7 +98,7 @@ static int PyBobLearnEMGMMMachine_init(PyBobLearnEMGMMMachineObject* self, PyObj
 
   // get the number of command line arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-  
+
   switch (nargs) {
 
     case 0: //default initializer ()
@@ -204,23 +204,23 @@ int PyBobLearnEMGMMMachine_setMeans(PyBobLearnEMGMMMachineObject* self, PyObject
     return -1;
   }
   auto input_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, means.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, means.name());
     return -1;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->getNInputs() && input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNGaussians(), self->cxx->getNInputs(), input->shape[1], input->shape[0], means.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "means");
   if (!b) return -1;
   self->cxx->setMeans(*b);
@@ -248,23 +248,23 @@ int PyBobLearnEMGMMMachine_setVariances(PyBobLearnEMGMMMachineObject* self, PyOb
     return -1;
   }
   auto input_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variances.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variances.name());
     return -1;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->getNInputs() && input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNGaussians(), self->cxx->getNInputs(), input->shape[1], input->shape[0], variances.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "variances");
   if (!b) return -1;
   self->cxx->setVariances(*b);
@@ -292,22 +292,22 @@ int PyBobLearnEMGMMMachine_setWeights(PyBobLearnEMGMMMachineObject* self, PyObje
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, weights.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, weights.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNGaussians(), input->shape[0], weights.name());
     return -1;
-  }  
+  }
 
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "weights");
   if (!b) return -1;
@@ -337,23 +337,23 @@ int PyBobLearnEMGMMMachine_setVarianceSupervector(PyBobLearnEMGMMMachineObject*
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variance_supervector.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variance_supervector.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs(), input->shape[0], variance_supervector.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "variance_supervector");
   if (!b) return -1;
   self->cxx->setVarianceSupervector(*b);
@@ -381,23 +381,23 @@ int PyBobLearnEMGMMMachine_setMeanSupervector(PyBobLearnEMGMMMachineObject* self
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, mean_supervector.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, mean_supervector.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNGaussians()*(Py_ssize_t)self->cxx->getNInputs(), input->shape[0], mean_supervector.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "mean_supervector");
   if (!b) return -1;
   self->cxx->setMeanSupervector(*b);
@@ -410,7 +410,7 @@ int PyBobLearnEMGMMMachine_setMeanSupervector(PyBobLearnEMGMMMachineObject* self
 /***** variance_thresholds *****/
 static auto variance_thresholds = bob::extension::VariableDoc(
   "variance_thresholds",
-  "array_like <double, 2D>",
+  "array_like <float, 2D>",
   "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar. ",
   ""
 );
@@ -427,8 +427,8 @@ int PyBobLearnEMGMMMachine_setVarianceThresholds(PyBobLearnEMGMMMachineObject* s
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variance_thresholds.name());
     return -1;
@@ -437,24 +437,24 @@ int PyBobLearnEMGMMMachine_setVarianceThresholds(PyBobLearnEMGMMMachineObject* s
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variance_thresholds.name());
     return -1;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->getNInputs() && input->shape[0] != (Py_ssize_t)self->cxx->getNGaussians()) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNGaussians(), self->cxx->getNInputs(), input->shape[1], input->shape[0], variance_thresholds.name());
     return -1;
-  }   
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "variance_thresholds");
   if (!b) return -1;
   self->cxx->setVarianceThresholds(*b);
   return 0;
-  BOB_CATCH_MEMBER("variance_thresholds could not be set", -1)  
+  BOB_CATCH_MEMBER("variance_thresholds could not be set", -1)
 }
 
 
 
 
-static PyGetSetDef PyBobLearnEMGMMMachine_getseters[] = { 
+static PyGetSetDef PyBobLearnEMGMMMachine_getseters[] = {
   {
    shape.name(),
    (getter)PyBobLearnEMGMMMachine_getShape,
@@ -505,7 +505,7 @@ static PyGetSetDef PyBobLearnEMGMMMachine_getseters[] = {
    mean_supervector.doc(),
    0
   },
-  
+
   {0}  // Sentinel
 };
 
@@ -525,9 +525,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMGMMMachine_Save(PyBobLearnEMGMMMachineObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -547,12 +547,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMGMMMachine_Load(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -563,7 +563,7 @@ static PyObject* PyBobLearnEMGMMMachine_Load(PyBobLearnEMGMMMachineObject* self,
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this GMMMachine with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -588,8 +588,8 @@ static PyObject* PyBobLearnEMGMMMachine_IsSimilarTo(PyBobLearnEMGMMMachineObject
         &PyBobLearnEMGMMMachine_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -643,7 +643,7 @@ static PyObject* PyBobLearnEMGMMMachine_resize(PyBobLearnEMGMMMachineObject* sel
 static auto log_likelihood = bob::extension::FunctionDoc(
   "log_likelihood",
   "Output the log likelihood of the sample, x, i.e. :math:`log(p(x|GMM))`. Inputs are checked.",
-  ".. note:: The :py:meth:`__call__` function is an alias for this.", 
+  ".. note:: The :py:meth:`__call__` function is an alias for this.",
   true
 )
 .add_prototype("input","output")
@@ -651,7 +651,7 @@ static auto log_likelihood = bob::extension::FunctionDoc(
 .add_return("output","float","The log likelihood");
 static PyObject* PyBobLearnEMGMMMachine_loglikelihood(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = log_likelihood.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
@@ -659,25 +659,25 @@ static PyObject* PyBobLearnEMGMMMachine_loglikelihood(PyBobLearnEMGMMMachineObje
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) return 0;
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
+
   // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `input`", Py_TYPE(self)->tp_name);
     log_likelihood.print_usage();
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64", Py_TYPE(self)->tp_name);
     log_likelihood.print_usage();
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0]);
     log_likelihood.print_usage();
     return 0;
-  }  
+  }
 
   double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
   return Py_BuildValue("d", value);
@@ -690,7 +690,7 @@ static PyObject* PyBobLearnEMGMMMachine_loglikelihood(PyBobLearnEMGMMMachineObje
 static auto log_likelihood_ = bob::extension::FunctionDoc(
   "log_likelihood_",
   "Output the log likelihood of the sample, x, i.e. :math:`log(p(x|GMM))`. Inputs are NOT checked.",
-  "", 
+  "",
   true
 )
 .add_prototype("input","output")
@@ -698,7 +698,7 @@ static auto log_likelihood_ = bob::extension::FunctionDoc(
 .add_return("output","float","The log likelihood");
 static PyObject* PyBobLearnEMGMMMachine_loglikelihood_(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = log_likelihood_.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
@@ -706,25 +706,25 @@ static PyObject* PyBobLearnEMGMMMachine_loglikelihood_(PyBobLearnEMGMMMachineObj
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &input)) return 0;
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
+
   // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `input`", Py_TYPE(self)->tp_name);
     log_likelihood.print_usage();
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64", Py_TYPE(self)->tp_name);
     log_likelihood.print_usage();
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0]);
     log_likelihood.print_usage();
     return 0;
-  }  
+  }
 
   double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
   return Py_BuildValue("d", value);
@@ -737,7 +737,7 @@ static PyObject* PyBobLearnEMGMMMachine_loglikelihood_(PyBobLearnEMGMMMachineObj
 static auto acc_statistics = bob::extension::FunctionDoc(
   "acc_statistics",
   "Accumulate the GMM statistics for this sample(s). Inputs are checked.",
-  "", 
+  "",
   true
 )
 .add_prototype("input,stats")
@@ -751,18 +751,18 @@ static PyObject* PyBobLearnEMGMMMachine_accStatistics(PyBobLearnEMGMMMachineObje
   PyBlitzArrayObject* input           = 0;
   PyBobLearnEMGMMStatsObject* stats = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input, 
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input,
                                                                  &PyBobLearnEMGMMStats_Type, &stats))
     return 0;
 
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
+
   blitz::Array<double,2>  blitz_test  = *PyBlitzArrayCxx_AsBlitz<double,2>(input);
   if (blitz_test.extent(1)==0)
     self->cxx->accStatistics(*PyBlitzArrayCxx_AsBlitz<double,1>(input), *stats->cxx);
   else
-    self->cxx->accStatistics(blitz_test, *stats->cxx);  
+    self->cxx->accStatistics(blitz_test, *stats->cxx);
 
 
   BOB_CATCH_MEMBER("cannot accumulate the statistics", 0)
@@ -774,7 +774,7 @@ static PyObject* PyBobLearnEMGMMMachine_accStatistics(PyBobLearnEMGMMMachineObje
 static auto acc_statistics_ = bob::extension::FunctionDoc(
   "acc_statistics_",
   "Accumulate the GMM statistics for this sample(s). Inputs are NOT checked.",
-  "", 
+  "",
   true
 )
 .add_prototype("input,stats")
@@ -782,24 +782,24 @@ static auto acc_statistics_ = bob::extension::FunctionDoc(
 .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Statistics of the GMM");
 static PyObject* PyBobLearnEMGMMMachine_accStatistics_(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = acc_statistics_.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
   PyBobLearnEMGMMStatsObject* stats = 0;
 
- if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input, 
+ if(!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O!", kwlist, &PyBlitzArray_Converter,&input,
                                                                  &PyBobLearnEMGMMStats_Type, &stats))
     return 0;
 
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
+
   blitz::Array<double,2>  blitz_test  = *PyBlitzArrayCxx_AsBlitz<double,2>(input);
   if (blitz_test.extent(1)==0)
     self->cxx->accStatistics_(*PyBlitzArrayCxx_AsBlitz<double,1>(input), *stats->cxx);
   else
-    self->cxx->accStatistics_(blitz_test, *stats->cxx);  
+    self->cxx->accStatistics_(blitz_test, *stats->cxx);
 
   BOB_CATCH_MEMBER("cannot accumulate the statistics", 0)
   Py_RETURN_NONE;
@@ -846,7 +846,7 @@ static PyObject* PyBobLearnEMGMMMachine_setVarianceThresholds_method(PyBobLearnE
 static auto get_gaussian = bob::extension::FunctionDoc(
   "get_gaussian",
   "Get the specified Gaussian component.",
-  ".. note:: An exception is thrown if i is out of range.", 
+  ".. note:: An exception is thrown if i is out of range.",
   true
 )
 .add_prototype("i","gaussian")
@@ -854,13 +854,13 @@ static auto get_gaussian = bob::extension::FunctionDoc(
 .add_return("gaussian",":py:class:`bob.learn.em.Gaussian`","Gaussian object");
 static PyObject* PyBobLearnEMGMMMachine_get_gaussian(PyBobLearnEMGMMMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_gaussian.kwlist(0);
 
   int i = 0;
 
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
- 
+
   //Allocating the correspondent python object
   PyBobLearnEMGaussianObject* retval =
     (PyBobLearnEMGaussianObject*)PyBobLearnEMGaussian_Type.tp_alloc(&PyBobLearnEMGaussian_Type, 0);
@@ -923,7 +923,7 @@ static PyMethodDef PyBobLearnEMGMMMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     acc_statistics_.doc()
   },
- 
+
   {
     get_gaussian.name(),
     (PyCFunction)PyBobLearnEMGMMMachine_get_gaussian,
@@ -937,7 +937,7 @@ static PyMethodDef PyBobLearnEMGMMMachine_methods[] = {
     METH_VARARGS|METH_KEYWORDS,
     set_variance_thresholds.doc()
   },
-  
+
   {0} /* Sentinel */
 };
 
@@ -977,4 +977,3 @@ bool init_BobLearnEMGMMMachine(PyObject* module)
   Py_INCREF(&PyBobLearnEMGMMMachine_Type);
   return PyModule_AddObject(module, "GMMMachine", (PyObject*)&PyBobLearnEMGMMMachine_Type) >= 0;
 }
-
diff --git a/bob/learn/em/gmm_stats.cpp b/bob/learn/em/gmm_stats.cpp
index 5befeb8e98743e236c37e0d77892ed5204b031f3..15a932f4684ced6e7be83348a9f9d17ee81456d3 100644
--- a/bob/learn/em/gmm_stats.cpp
+++ b/bob/learn/em/gmm_stats.cpp
@@ -120,7 +120,7 @@ static int PyBobLearnEMGMMStats_init(PyBobLearnEMGMMStatsObject* self, PyObject*
         arg = PyList_GET_ITEM(tmp, 0);
       }
 
-      /**If the constructor input is Gaussian object**/	
+      /**If the constructor input is Gaussian object**/
      if (PyBobLearnEMGMMStats_Check(arg))
        return PyBobLearnEMGMMStats_init_copy(self, args, kwargs);
       /**If the constructor input is a HDF5**/
@@ -193,28 +193,28 @@ int PyBobLearnEMGMMStats_setN(PyBobLearnEMGMMStatsObject* self, PyObject* value,
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, n.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, n.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->n.extent(0)){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->n.extent(0), input->shape[0], n.name());
     return -1;
   }
-  
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "n");
   if (!b) return -1;
   self->cxx->n = *b;
   return 0;
-  BOB_CATCH_MEMBER("n could not be set", -1)  
+  BOB_CATCH_MEMBER("n could not be set", -1)
 }
 
 
@@ -237,28 +237,28 @@ int PyBobLearnEMGMMStats_setSum_px(PyBobLearnEMGMMStatsObject* self, PyObject* v
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, sum_px.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, sum_px.name());
     return -1;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->sumPx.extent(1) && input->shape[0] != (Py_ssize_t)self->cxx->sumPx.extent(0)) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->sumPx.extent(1), (Py_ssize_t)self->cxx->sumPx.extent(0), (Py_ssize_t)input->shape[1], (Py_ssize_t)input->shape[0], sum_px.name());
     return -1;
   }
-  
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "sum_px");
   if (!b) return -1;
   self->cxx->sumPx = *b;
   return 0;
-  BOB_CATCH_MEMBER("sum_px could not be set", -1)  
+  BOB_CATCH_MEMBER("sum_px could not be set", -1)
 }
 
 
@@ -281,28 +281,28 @@ int PyBobLearnEMGMMStats_setSum_pxx(PyBobLearnEMGMMStatsObject* self, PyObject*
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, sum_pxx.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, sum_pxx.name());
     return -1;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->sumPxx.extent(1) && input->shape[0] != (Py_ssize_t)self->cxx->sumPxx.extent(0)) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->sumPxx.extent(1), (Py_ssize_t)self->cxx->sumPxx.extent(0), (Py_ssize_t)input->shape[1], (Py_ssize_t)input->shape[0], sum_pxx.name());
     return -1;
   }
-  
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "sum_pxx");
   if (!b) return -1;
   self->cxx->sumPxx = *b;
   return 0;
-  BOB_CATCH_MEMBER("sum_pxx could not be set", -1)  
+  BOB_CATCH_MEMBER("sum_pxx could not be set", -1)
 }
 
 
@@ -339,7 +339,7 @@ int PyBobLearnEMGMMStats_setT(PyBobLearnEMGMMStatsObject* self, PyObject* value,
 /***** log_likelihood *****/
 static auto log_likelihood = bob::extension::VariableDoc(
   "log_likelihood",
-  "double",
+  "float",
   "The accumulated log likelihood of all samples"
 );
 PyObject* PyBobLearnEMGMMStats_getLog_likelihood(PyBobLearnEMGMMStatsObject* self, void*){
@@ -411,7 +411,7 @@ static PyGetSetDef PyBobLearnEMGMMStats_getseters[] = {
     (setter)PyBobLearnEMGMMStats_setLog_likelihood,
     log_likelihood.doc(),
     0
-  },  
+  },
   {
    shape.name(),
    (getter)PyBobLearnEMGMMStats_getShape,
@@ -440,9 +440,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMGMMStats_Save(PyBobLearnEMGMMStatsObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -462,12 +462,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMGMMStats_Load(PyBobLearnEMGMMStatsObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -478,7 +478,7 @@ static PyObject* PyBobLearnEMGMMStats_Load(PyBobLearnEMGMMStatsObject* self, PyO
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this GMMStats with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -503,8 +503,8 @@ static PyObject* PyBobLearnEMGMMStats_IsSimilarTo(PyBobLearnEMGMMStatsObject* se
         &PyBobLearnEMGMMStats_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -671,4 +671,3 @@ bool init_BobLearnEMGMMStats(PyObject* module)
   Py_INCREF(&PyBobLearnEMGMMStats_Type);
   return PyModule_AddObject(module, "GMMStats", (PyObject*)&PyBobLearnEMGMMStats_Type) >= 0;
 }
-
diff --git a/bob/learn/em/isv_trainer.cpp b/bob/learn/em/isv_trainer.cpp
index ae50c97fc45e61588feaf72d89abb2b13e1a61f7..ea98e2eea6e5a8045310e2b6ab16fa198f877a53 100644
--- a/bob/learn/em/isv_trainer.cpp
+++ b/bob/learn/em/isv_trainer.cpp
@@ -98,7 +98,7 @@ static auto ISVTrainer_doc = bob::extension::ClassDoc(
   .add_prototype("other","")
   .add_prototype("","")
   .add_parameter("other", ":py:class:`bob.learn.em.ISVTrainer`", "A ISVTrainer object to be copied.")
-  .add_parameter("relevance_factor", "double", "")
+  .add_parameter("relevance_factor", "float", "")
 );
 
 
@@ -406,7 +406,7 @@ static auto initialize = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("isv_base,stats,rng")
+.add_prototype("isv_base, stats, rng")
 .add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
 .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object")
 .add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
@@ -446,7 +446,7 @@ static auto e_step = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("isv_base,stats")
+.add_prototype("isv_base, stats")
 .add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
 .add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
 static PyObject* PyBobLearnEMISVTrainer_e_step(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
@@ -509,7 +509,7 @@ static auto enroll = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("isv_machine,features,n_iter","")
+.add_prototype("isv_machine, features, n_iter")
 .add_parameter("isv_machine", ":py:class:`bob.learn.em.ISVMachine`", "ISVMachine Object")
 .add_parameter("features", "list(:py:class:`bob.learn.em.GMMStats`)`", "")
 .add_parameter("n_iter", "int", "Number of iterations");
diff --git a/bob/learn/em/ivector_machine.cpp b/bob/learn/em/ivector_machine.cpp
index db6e55d516d6a5fba4986a0473fa73dc7ac8ce57..1af477f2c1687a3e95b5862dc785cb27c410b04a 100644
--- a/bob/learn/em/ivector_machine.cpp
+++ b/bob/learn/em/ivector_machine.cpp
@@ -31,7 +31,7 @@ static auto IVectorMachine_doc = bob::extension::ClassDoc(
 
   .add_parameter("ubm", ":py:class:`bob.learn.em.GMMMachine`", "The Universal Background Model.")
   .add_parameter("rt", "int", "Size of the Total Variability matrix (CD x rt).")
-  .add_parameter("variance_threshold", "double", "Variance flooring threshold for the :math:`\\Sigma` (diagonal) matrix")
+  .add_parameter("variance_threshold", "float", "Variance flooring threshold for the :math:`\\Sigma` (diagonal) matrix")
 
   .add_parameter("other", ":py:class:`bob.learn.em.IVectorMachine`", "A IVectorMachine object to be copied.")
   .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
@@ -258,7 +258,7 @@ int PyBobLearnEMIVectorMachine_setSigma(PyBobLearnEMIVectorMachineObject* self,
 /***** variance_threshold *****/
 static auto variance_threshold = bob::extension::VariableDoc(
   "variance_threshold",
-  "double",
+  "float",
   "Threshold for the variance contained in sigma",
   ""
 );
@@ -271,7 +271,7 @@ int PyBobLearnEMIVectorMachine_setVarianceThreshold(PyBobLearnEMIVectorMachineOb
   BOB_TRY
 
   if (!PyBob_NumberCheck(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, variance_threshold.name());
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a float", Py_TYPE(self)->tp_name, variance_threshold.name());
     return -1;
   }
 
diff --git a/bob/learn/em/jfa_trainer.cpp b/bob/learn/em/jfa_trainer.cpp
index 02ea874d39c00016c4fa5b1f343485b318193ec3..38a82a813718ebb40ab570377e29369988827fec 100644
--- a/bob/learn/em/jfa_trainer.cpp
+++ b/bob/learn/em/jfa_trainer.cpp
@@ -946,9 +946,9 @@ static auto enroll = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("jfa_machine,features,n_iter","")
+.add_prototype("jfa_machine,features,n_iter")
 .add_parameter("jfa_machine", ":py:class:`bob.learn.em.JFAMachine`", "JFAMachine Object")
-.add_parameter("features", "list(:py:class:`bob.learn.em.GMMStats`)`", "")
+.add_parameter("features", "[:py:class:`bob.learn.em.GMMStats`]", "")
 .add_parameter("n_iter", "int", "Number of iterations");
 static PyObject* PyBobLearnEMJFATrainer_enroll(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
diff --git a/bob/learn/em/kmeans_machine.cpp b/bob/learn/em/kmeans_machine.cpp
index e00d40a7941599093f22bad60a7e96010ad6d141..958e439728f3f8ea3d7c106aa43ccd4437800529 100644
--- a/bob/learn/em/kmeans_machine.cpp
+++ b/bob/learn/em/kmeans_machine.cpp
@@ -98,7 +98,7 @@ static int PyBobLearnEMKMeansMachine_init(PyBobLearnEMKMeansMachineObject* self,
 
   // get the number of command line arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-  
+
   switch (nargs) {
 
     case 0: //default initializer ()
@@ -204,23 +204,23 @@ int PyBobLearnEMKMeansMachine_setMeans(PyBobLearnEMKMeansMachineObject* self, Py
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, means.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, means.name());
     return 0;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->getNInputs()) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], means.name());
     return 0;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "means");
   if (!b) return -1;
   self->cxx->setMeans(*b);
@@ -229,7 +229,7 @@ int PyBobLearnEMKMeansMachine_setMeans(PyBobLearnEMKMeansMachineObject* self, Py
 }
 
 
-static PyGetSetDef PyBobLearnEMKMeansMachine_getseters[] = { 
+static PyGetSetDef PyBobLearnEMKMeansMachine_getseters[] = {
   {
    shape.name(),
    (getter)PyBobLearnEMKMeansMachine_getShape,
@@ -263,9 +263,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMKMeansMachine_Save(PyBobLearnEMKMeansMachineObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -285,12 +285,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMKMeansMachine_Load(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -301,7 +301,7 @@ static PyObject* PyBobLearnEMKMeansMachine_Load(PyBobLearnEMKMeansMachineObject*
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this KMeansMachine with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -326,8 +326,8 @@ static PyObject* PyBobLearnEMKMeansMachine_IsSimilarTo(PyBobLearnEMKMeansMachine
         &PyBobLearnEMKMeansMachine_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -380,7 +380,7 @@ static PyObject* PyBobLearnEMKMeansMachine_resize(PyBobLearnEMKMeansMachineObjec
 static auto get_mean = bob::extension::FunctionDoc(
   "get_mean",
   "Get the i'th mean.",
-  ".. note:: An exception is thrown if i is out of range.", 
+  ".. note:: An exception is thrown if i is out of range.",
   true
 )
 .add_prototype("i","mean")
@@ -388,12 +388,12 @@ static auto get_mean = bob::extension::FunctionDoc(
 .add_return("mean","array_like <float, 1D>","Mean array");
 static PyObject* PyBobLearnEMKMeansMachine_get_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_mean.kwlist(0);
 
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
- 
+
   return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getMean(i));
 
   BOB_CATCH_MEMBER("cannot get the mean", 0)
@@ -404,7 +404,7 @@ static PyObject* PyBobLearnEMKMeansMachine_get_mean(PyBobLearnEMKMeansMachineObj
 static auto set_mean = bob::extension::FunctionDoc(
   "set_mean",
   "Set the i'th mean.",
-  ".. note:: An exception is thrown if i is out of range.", 
+  ".. note:: An exception is thrown if i is out of range.",
   true
 )
 .add_prototype("i,mean")
@@ -412,37 +412,37 @@ static auto set_mean = bob::extension::FunctionDoc(
 .add_parameter("mean", "array_like <float, 1D>", "Mean array");
 static PyObject* PyBobLearnEMKMeansMachine_set_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = set_mean.kwlist(0);
 
   int i = 0;
   PyBlitzArrayObject* mean = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &mean)) return 0;
-  
+
   //protects acquired resources through this scope
   auto mean_ = make_safe(mean);
 
-  // perform check on the input  
+  // perform check on the input
   if (mean->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, set_mean.name());
     return 0;
-  }  
+  }
 
   if (mean->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, set_mean.name());
     return 0;
-  }  
+  }
 
   if (mean->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), mean->shape[0], set_mean.name());
     return 0;
-  }  
+  }
 
   //setting the mean
   self->cxx->setMean(i, *PyBlitzArrayCxx_AsBlitz<double,1>(mean));
 
   BOB_CATCH_MEMBER("cannot set the mean", 0)
-  
+
   Py_RETURN_NONE;
 }
 
@@ -452,7 +452,7 @@ static PyObject* PyBobLearnEMKMeansMachine_set_mean(PyBobLearnEMKMeansMachineObj
 static auto get_distance_from_mean = bob::extension::FunctionDoc(
   "get_distance_from_mean",
   "Return the power of two of the square Euclidean distance of the sample, x, to the i'th mean.",
-  ".. note:: An exception is thrown if i is out of range.", 
+  ".. note:: An exception is thrown if i is out of range.",
   true
 )
 .add_prototype("input,i","output")
@@ -461,34 +461,34 @@ static auto get_distance_from_mean = bob::extension::FunctionDoc(
 .add_return("output","float","Square Euclidean distance of the sample, x, to the i'th mean");
 static PyObject* PyBobLearnEMKMeansMachine_get_distance_from_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_distance_from_mean.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
   int i = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&i", kwlist, &PyBlitzArray_Converter, &input, &i)){ 
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&i", kwlist, &PyBlitzArray_Converter, &input, &i)){
     return 0;
   }
 
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, get_distance_from_mean.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, get_distance_from_mean.name());
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], get_distance_from_mean.name());
     return 0;
-  }  
-  
+  }
+
   double output = self->cxx->getDistanceFromMean(*PyBlitzArrayCxx_AsBlitz<double,1>(input),i);
   return Py_BuildValue("d", output);
 
@@ -508,7 +508,7 @@ static auto get_closest_mean = bob::extension::FunctionDoc(
 .add_return("output", "(int, int)", "Tuple containing the closest mean and the minimum distance from the input");
 static PyObject* PyBobLearnEMKMeansMachine_get_closest_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_closest_mean.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
@@ -518,26 +518,26 @@ static PyObject* PyBobLearnEMKMeansMachine_get_closest_mean(PyBobLearnEMKMeansMa
   auto input_ = make_safe(input);
 
   size_t closest_mean = 0;
-  double min_distance = -1;   
-  
-  // perform check on the input  
+  double min_distance = -1;
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, get_closest_mean.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, get_closest_mean.name());
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], get_closest_mean.name());
     return 0;
-  }    
-  
+  }
+
   self->cxx->getClosestMean(*PyBlitzArrayCxx_AsBlitz<double,1>(input), closest_mean, min_distance);
-    
+
   return Py_BuildValue("(i,d)", closest_mean, min_distance);
 
   BOB_CATCH_MEMBER("cannot compute the closest mean", 0)
@@ -553,10 +553,10 @@ static auto get_min_distance = bob::extension::FunctionDoc(
 )
 .add_prototype("input","output")
 .add_parameter("input", "array_like <float, 1D>", "The data sample (feature vector)")
-.add_return("output", "double", "The minimum distance");
+.add_return("output", "float", "The minimum distance");
 static PyObject* PyBobLearnEMKMeansMachine_get_min_distance(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_min_distance.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
@@ -565,23 +565,23 @@ static PyObject* PyBobLearnEMKMeansMachine_get_min_distance(PyBobLearnEMKMeansMa
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
   double min_distance = 0;
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, get_min_distance.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, get_min_distance.name());
     return 0;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], get_min_distance.name());
     return 0;
-  }    
-  
+  }
+
   min_distance = self->cxx->getMinDistance(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
 
   return Py_BuildValue("d", min_distance);
@@ -593,7 +593,7 @@ static PyObject* PyBobLearnEMKMeansMachine_get_min_distance(PyBobLearnEMKMeansMa
 static auto get_variances_and_weights_for_each_cluster = bob::extension::FunctionDoc(
   "get_variances_and_weights_for_each_cluster",
   "For each mean, find the subset of the samples that is closest to that mean, and calculate"
-  " 1) the variance of that subset (the cluster variance)" 
+  " 1) the variance of that subset (the cluster variance)"
   " 2) the proportion of the samples represented by that subset (the cluster weight)",
   "",
   true
@@ -603,7 +603,7 @@ static auto get_variances_and_weights_for_each_cluster = bob::extension::Functio
 .add_return("output", "(array_like <float, 2D>, array_like <float, 1D>)", "A tuple with the variances and the weights respectively");
 static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist =  get_variances_and_weights_for_each_cluster.kwlist(0);
 
   PyBlitzArrayObject* input = 0;
@@ -612,16 +612,16 @@ static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cl
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
 
-  // perform check on the input  
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, get_variances_and_weights_for_each_cluster.name());
     return 0;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, get_variances_and_weights_for_each_cluster.name());
     return 0;
-  }  
+  }
 
   if (input->shape[1] != (Py_ssize_t)self->cxx->getNInputs() ) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [N, %" PY_FORMAT_SIZE_T "d] not [N, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[1], get_variances_and_weights_for_each_cluster.name());
@@ -630,7 +630,7 @@ static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cl
 
   blitz::Array<double,2> variances(self->cxx->getNMeans(),self->cxx->getNInputs());
   blitz::Array<double,1> weights(self->cxx->getNMeans());
-  
+
   self->cxx->getVariancesAndWeightsForEachCluster(*PyBlitzArrayCxx_AsBlitz<double,2>(input),variances,weights);
 
   return Py_BuildValue("(N,N)",PyBlitzArrayCxx_AsConstNumpy(variances), PyBlitzArrayCxx_AsConstNumpy(weights));
@@ -655,7 +655,7 @@ static auto __get_variances_and_weights_for_each_cluster_init__ = bob::extension
 .add_parameter("weights", "array_like <float, 1D>", "Weight array");
 static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_init(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist =  __get_variances_and_weights_for_each_cluster_init__.kwlist(0);
 
   PyBlitzArrayObject* variances = 0;
@@ -690,7 +690,7 @@ static auto __get_variances_and_weights_for_each_cluster_acc__ = bob::extension:
 .add_parameter("weights", "array_like <float, 1D>", "Weight array");
 static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_acc(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist =  __get_variances_and_weights_for_each_cluster_acc__.kwlist(0);
 
   PyBlitzArrayObject* data      = 0;
@@ -726,7 +726,7 @@ static auto __get_variances_and_weights_for_each_cluster_fin__ = bob::extension:
 .add_parameter("weights", "array_like <float, 1D>", "Weight array");
 static PyObject* PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_fin(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist =  __get_variances_and_weights_for_each_cluster_fin__.kwlist(0);
 
   PyBlitzArrayObject* variances = 0;
@@ -768,61 +768,61 @@ static PyMethodDef PyBobLearnEMKMeansMachine_methods[] = {
     (PyCFunction)PyBobLearnEMKMeansMachine_resize,
     METH_VARARGS|METH_KEYWORDS,
     resize.doc()
-  },  
+  },
   {
     get_mean.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_mean,
     METH_VARARGS|METH_KEYWORDS,
     get_mean.doc()
-  },  
+  },
   {
     set_mean.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_set_mean,
     METH_VARARGS|METH_KEYWORDS,
     set_mean.doc()
-  },  
+  },
   {
     get_distance_from_mean.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_distance_from_mean,
     METH_VARARGS|METH_KEYWORDS,
     get_distance_from_mean.doc()
-  },  
+  },
   {
     get_closest_mean.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_closest_mean,
     METH_VARARGS|METH_KEYWORDS,
     get_closest_mean.doc()
-  },  
+  },
   {
     get_min_distance.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_min_distance,
     METH_VARARGS|METH_KEYWORDS,
     get_min_distance.doc()
-  },  
+  },
   {
     get_variances_and_weights_for_each_cluster.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster,
     METH_VARARGS|METH_KEYWORDS,
     get_variances_and_weights_for_each_cluster.doc()
-  },  
+  },
   {
     __get_variances_and_weights_for_each_cluster_init__.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_init,
     METH_VARARGS|METH_KEYWORDS,
     __get_variances_and_weights_for_each_cluster_init__.doc()
-  },  
+  },
   {
     __get_variances_and_weights_for_each_cluster_acc__.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_acc,
     METH_VARARGS|METH_KEYWORDS,
     __get_variances_and_weights_for_each_cluster_acc__.doc()
-  },  
+  },
   {
     __get_variances_and_weights_for_each_cluster_fin__.name(),
     (PyCFunction)PyBobLearnEMKMeansMachine_get_variances_and_weights_for_each_cluster_fin,
     METH_VARARGS|METH_KEYWORDS,
     __get_variances_and_weights_for_each_cluster_fin__.doc()
-  },  
+  },
 
   {0} /* Sentinel */
 };
@@ -863,4 +863,3 @@ bool init_BobLearnEMKMeansMachine(PyObject* module)
   Py_INCREF(&PyBobLearnEMKMeansMachine_Type);
   return PyModule_AddObject(module, "KMeansMachine", (PyObject*)&PyBobLearnEMKMeansMachine_Type) >= 0;
 }
-
diff --git a/bob/learn/em/linear_scoring.cpp b/bob/learn/em/linear_scoring.cpp
index 091b3f72bd2d0b45c42c5ff4ee75dc77e9ab3d6f..c475ff0cd9e0128450bf859ff461cb88b679e24e 100644
--- a/bob/learn/em/linear_scoring.cpp
+++ b/bob/learn/em/linear_scoring.cpp
@@ -15,7 +15,7 @@ static int extract_gmmstats_list(PyObject *list,
                              std::vector<boost::shared_ptr<const bob::learn::em::GMMStats> >& training_data)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
+
     PyBobLearnEMGMMStatsObject* stats;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
       PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
@@ -30,7 +30,7 @@ static int extract_gmmmachine_list(PyObject *list,
                              std::vector<boost::shared_ptr<const bob::learn::em::GMMMachine> >& training_data)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
+
     PyBobLearnEMGMMMachineObject* stats;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMMachine_Type, &stats)){
       PyErr_Format(PyExc_RuntimeError, "Expected GMMMachine objects");
@@ -53,7 +53,7 @@ int extract_array_list(PyObject* list, std::vector<blitz::Array<double,N> >& vec
 
   for (int i=0; i<PyList_GET_SIZE(list); i++)
   {
-    PyBlitzArrayObject* blitz_object; 
+    PyBlitzArrayObject* blitz_object;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
       PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
       return -1;
@@ -76,10 +76,10 @@ bob::extension::FunctionDoc linear_scoring1 = bob::extension::FunctionDoc(
   true
 )
 .add_prototype("models, ubm, test_stats, test_channelOffset, frame_length_normalisation", "output")
-.add_parameter("models", "list(:py:class:`bob.learn.em.GMMMachine`)", "")
+.add_parameter("models", "[:py:class:`bob.learn.em.GMMMachine`]", "")
 .add_parameter("ubm", ":py:class:`bob.learn.em.GMMMachine`", "")
-.add_parameter("test_stats", "list(:py:class:`bob.learn.em.GMMStats`)", "")
-.add_parameter("test_channelOffset", "list(array_like<float,1>)", "")
+.add_parameter("test_stats", "[:py:class:`bob.learn.em.GMMStats`]", "")
+.add_parameter("test_channelOffset", "[array_like<float,1>]", "")
 .add_parameter("frame_length_normalisation", "bool", "")
 .add_return("output","array_like<float,1>","Score");
 
@@ -117,10 +117,10 @@ bob::extension::FunctionDoc linear_scoring3 = bob::extension::FunctionDoc(
 .add_return("output","array_like<float,1>","Score");
 
 PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
-    
+
   //Cheking the number of arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-    
+
   //Reading the first input argument
   PyObject* arg = 0;
   if (PyTuple_Size(args))
@@ -130,10 +130,10 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
     auto tmp_ = make_safe(tmp);
     arg = PyList_GET_ITEM(tmp, 0);
   }
-  
+
   //Checking the signature of the method (list of GMMMachine as input)
   if ((PyList_Check(arg)) && PyBobLearnEMGMMMachine_Check(PyList_GetItem(arg, 0)) && (nargs >= 3) && (nargs<=5) ){
-  
+
     char** kwlist = linear_scoring1.kwlist(0);
 
     PyObject* gmm_list_o                 = 0;
@@ -174,7 +174,7 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
 
   //Checking the signature of the method (list of arrays as input
   else if ((PyList_Check(arg)) && PyArray_Check(PyList_GetItem(arg, 0)) && (nargs >= 4) && (nargs<=6) ){
-  
+
     char** kwlist = linear_scoring2.kwlist(0);
 
     PyObject* model_supervector_list_o        = 0;
@@ -190,13 +190,13 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
                                                                        &PyList_Type, &stats_list_o,
                                                                        &PyList_Type, &channel_offset_list_o,
                                                                        &PyBool_Type, &frame_length_normalisation)){
-      linear_scoring2.print_usage(); 
+      linear_scoring2.print_usage();
       return 0;
     }
-    
+
     //protects acquired resources through this scope
     auto ubm_means_ = make_safe(ubm_means);
-    auto ubm_variances_ = make_safe(ubm_variances);    
+    auto ubm_variances_ = make_safe(ubm_variances);
 
     std::vector<blitz::Array<double,1> > model_supervector_list;
     if(extract_array_list(model_supervector_list_o ,model_supervector_list)!=0)
@@ -217,12 +217,12 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
       bob::learn::em::linearScoring(model_supervector_list, *PyBlitzArrayCxx_AsBlitz<double,1>(ubm_means),*PyBlitzArrayCxx_AsBlitz<double,1>(ubm_variances), stats_list, channel_offset_list, f(frame_length_normalisation),scores);
 
     return PyBlitzArrayCxx_AsConstNumpy(scores);
-  
+
   }
-  
+
   //Checking the signature of the method (list of arrays as input
   else if (PyArray_Check(arg) && (nargs >= 5) && (nargs<=6) ){
-  
+
     char** kwlist = linear_scoring3.kwlist(0);
 
     PyBlitzArrayObject* model                 = 0;
@@ -238,10 +238,10 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
                                                                        &PyBobLearnEMGMMStats_Type, &stats,
                                                                        &PyBlitzArray_Converter, &channel_offset,
                                                                        &PyBool_Type, &frame_length_normalisation)){
-      linear_scoring3.print_usage(); 
+      linear_scoring3.print_usage();
       return 0;
     }
-    
+
     //protects acquired resources through this scope
     auto model_ = make_safe(model);
     auto ubm_means_ = make_safe(ubm_means);
@@ -253,7 +253,7 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
     return Py_BuildValue("d",score);
   }
 
-  
+
   else{
     PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - linear_scoring requires 5 or 6 arguments, but you provided %d (see help)", nargs);
     linear_scoring1.print_usage();
@@ -263,4 +263,3 @@ PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwarg
   }
 
 }
-
diff --git a/bob/learn/em/map_gmm_trainer.cpp b/bob/learn/em/map_gmm_trainer.cpp
index 37a220dc6f94ae48fdd2e37c310a6b06048581d1..eae15d4f232c55c3f8d3c573550ba0b1d4e940c3 100644
--- a/bob/learn/em/map_gmm_trainer.cpp
+++ b/bob/learn/em/map_gmm_trainer.cpp
@@ -195,7 +195,7 @@ static PyObject* PyBobLearnEMMAPGMMTrainer_RichCompare(PyBobLearnEMMAPGMMTrainer
 /***** relevance_factor *****/
 static auto relevance_factor = bob::extension::VariableDoc(
   "relevance_factor",
-  "double",
+  "float",
   "If set the reynolds_adaptation parameters, will apply the Reynolds Adaptation Factor. See Eq (14) from [Reynolds2000]_",
   ""
 );
@@ -208,7 +208,7 @@ int PyBobLearnEMMAPGMMTrainer_setRelevanceFactor(PyBobLearnEMMAPGMMTrainerObject
   BOB_TRY
 
   if(!PyBob_NumberCheck(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, relevance_factor.name());
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a float", Py_TYPE(self)->tp_name, relevance_factor.name());
     return -1;
   }
 
@@ -221,7 +221,7 @@ int PyBobLearnEMMAPGMMTrainer_setRelevanceFactor(PyBobLearnEMMAPGMMTrainerObject
 /***** alpha *****/
 static auto alpha = bob::extension::VariableDoc(
   "alpha",
-  "double",
+  "float",
   "Set directly the alpha parameter (Eq (14) from [Reynolds2000]_), ignoring zeroth order statistics as a weighting factor.",
   ""
 );
@@ -234,7 +234,7 @@ int PyBobLearnEMMAPGMMTrainer_setAlpha(PyBobLearnEMMAPGMMTrainerObject* self, Py
   BOB_TRY
 
   if(!PyBob_NumberCheck(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a double", Py_TYPE(self)->tp_name, alpha.name());
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a float", Py_TYPE(self)->tp_name, alpha.name());
     return -1;
   }
 
@@ -333,11 +333,11 @@ static PyObject* PyBobLearnEMMAPGMMTrainer_initialize(PyBobLearnEMMAPGMMTrainerO
 /*** e_step ***/
 static auto e_step = bob::extension::FunctionDoc(
   "e_step",
-  "Calculates and saves statistics across the dataset and saves these as :py:attr`gmm_statistics`. ",
+  "Calculates and saves statistics across the dataset and saves these as :py:attr:`gmm_statistics`. ",
 
   "Calculates the average log likelihood of the observations given the GMM,"
   "and returns this in average_log_likelihood."
-  "The statistics, :py:attr`gmm_statistics`, will be used in the :py:meth:`m_step` that follows.",
+  "The statistics, :py:attr:`gmm_statistics`, will be used in the :py:meth:`m_step` that follows.",
 
   true
 )
diff --git a/bob/learn/em/plda_base.cpp b/bob/learn/em/plda_base.cpp
index 5cfb508c1ec72117e2f31c41a6312fefaa7c3cab..bc9e5c3b341a759b9472c1ce68da3f45824b3139 100644
--- a/bob/learn/em/plda_base.cpp
+++ b/bob/learn/em/plda_base.cpp
@@ -39,8 +39,8 @@ static auto PLDABase_doc = bob::extension::ClassDoc(
   .add_parameter("dim_d", "int", "Dimensionality of the feature vector.")
   .add_parameter("dim_f", "int", "Size of :math:`F` (between class variantion matrix).")
   .add_parameter("dim_g", "int", "Size of :math:`G` (within class variantion matrix).")
-  .add_parameter("variance_threshold", "double", "The smallest possible value of the variance (Ignored if set to 0.)")
-  
+  .add_parameter("variance_threshold", "float", "The smallest possible value of the variance (Ignored if set to 0.)")
+
   .add_parameter("other", ":py:class:`bob.learn.em.PLDABase`", "A PLDABase object to be copied.")
   .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading")
 
@@ -80,21 +80,21 @@ static int PyBobLearnEMPLDABase_init_hdf5(PyBobLearnEMPLDABaseObject* self, PyOb
 static int PyBobLearnEMPLDABase_init_dim(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = PLDABase_doc.kwlist(0);
-  
+
   int dim_D, dim_F, dim_G = 1;
   double variance_threshold = 0.0;
 
-  //Here we have to select which keyword argument to read  
+  //Here we have to select which keyword argument to read
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iii|d", kwlist, &dim_D, &dim_F, &dim_G, &variance_threshold)){
     PLDABase_doc.print_usage();
     return -1;
   }
-  
+
   if(dim_D <= 0){
     PyErr_Format(PyExc_TypeError, "dim_D argument must be greater than or equal to one");
     return -1;
   }
-  
+
   if(dim_F <= 0){
     PyErr_Format(PyExc_TypeError, "dim_F argument must be greater than or equal to one");
     return -1;
@@ -110,7 +110,7 @@ static int PyBobLearnEMPLDABase_init_dim(PyBobLearnEMPLDABaseObject* self, PyObj
     return -1;
   }
 
-  
+
   self->cxx.reset(new bob::learn::em::PLDABase(dim_D, dim_F, dim_G, variance_threshold));
   return 0;
 }
@@ -358,7 +358,7 @@ static PyObject* PyBobLearnEMPLDABase_getGtISigma(PyBobLearnEMPLDABaseObject* se
 /***** __logdet_alpha__ *****/
 static auto __logdet_alpha__ = bob::extension::VariableDoc(
   "__logdet_alpha__",
-  "double",
+  "float",
   "Gets :math:`\\log(\\det(\\alpha))`",
   ""
 );
@@ -371,7 +371,7 @@ static PyObject* PyBobLearnEMPLDABase_getLogDetAlpha(PyBobLearnEMPLDABaseObject*
 /***** __logdet_sigma__ *****/
 static auto __logdet_sigma__ = bob::extension::VariableDoc(
   "__logdet_sigma__",
-  "double",
+  "float",
   "Gets :math:`\\log(\\det(\\Sigma))`",
   ""
 );
@@ -385,7 +385,7 @@ static PyObject* PyBobLearnEMPLDABase_getLogDetSigma(PyBobLearnEMPLDABaseObject*
 /***** variance_threshold *****/
 static auto variance_threshold = bob::extension::VariableDoc(
   "variance_threshold",
-  "double",
+  "float",
   "",
   ""
 );
@@ -438,14 +438,14 @@ int PyBobLearnEMPLDABase_setSigma(PyBobLearnEMPLDABaseObject* self, PyObject* va
 }
 
 
-static PyGetSetDef PyBobLearnEMPLDABase_getseters[] = { 
+static PyGetSetDef PyBobLearnEMPLDABase_getseters[] = {
   {
    shape.name(),
    (getter)PyBobLearnEMPLDABase_getShape,
    0,
    shape.doc(),
    0
-  },  
+  },
   {
    F.name(),
    (getter)PyBobLearnEMPLDABase_getF,
@@ -549,9 +549,9 @@ static auto save = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMPLDABase_Save(PyBobLearnEMPLDABaseObject* self,  PyObject* args, PyObject* kwargs) {
 
   BOB_TRY
-  
+
   // get list of arguments
-  char** kwlist = save.kwlist(0);  
+  char** kwlist = save.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
 
@@ -571,12 +571,12 @@ static auto load = bob::extension::FunctionDoc(
 .add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for reading");
 static PyObject* PyBobLearnEMPLDABase_Load(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
-  char** kwlist = load.kwlist(0);  
+
+  char** kwlist = load.kwlist(0);
   PyBobIoHDF5FileObject* hdf5;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, PyBobIoHDF5File_Converter, &hdf5)) return 0;
-  
-  auto hdf5_ = make_safe(hdf5);  
+
+  auto hdf5_ = make_safe(hdf5);
   self->cxx->load(*hdf5->f);
 
   BOB_CATCH_MEMBER("cannot load the data", 0)
@@ -587,7 +587,7 @@ static PyObject* PyBobLearnEMPLDABase_Load(PyBobLearnEMPLDABaseObject* self, PyO
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this PLDABase with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -612,8 +612,8 @@ static PyObject* PyBobLearnEMPLDABase_IsSimilarTo(PyBobLearnEMPLDABaseObject* se
         &PyBobLearnEMPLDABase_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -648,7 +648,7 @@ static PyObject* PyBobLearnEMPLDABase_resize(PyBobLearnEMPLDABaseObject* self, P
     PyErr_Format(PyExc_TypeError, "dim_d argument must be greater than or equal to one");
     Py_RETURN_NONE;
   }
-  
+
   if(dim_F <= 0){
     PyErr_Format(PyExc_TypeError, "dim_f argument must be greater than or equal to one");
     Py_RETURN_NONE;
@@ -680,7 +680,7 @@ static auto get_gamma = bob::extension::FunctionDoc(
 .add_return("output","array_like <float, 2D>","Get the :math:`\\gamma` matrix");
 static PyObject* PyBobLearnEMPLDABase_getGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_gamma.kwlist(0);
 
   int i = 0;
@@ -704,7 +704,7 @@ static auto has_gamma = bob::extension::FunctionDoc(
 .add_return("output","bool","");
 static PyObject* PyBobLearnEMPLDABase_hasGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = has_gamma.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
@@ -713,7 +713,7 @@ static PyObject* PyBobLearnEMPLDABase_hasGamma(PyBobLearnEMPLDABaseObject* self,
     Py_RETURN_TRUE;
   else
     Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)    
+ BOB_CATCH_MEMBER("`has_gamma` could not be read", 0)
 }
 
 
@@ -725,22 +725,22 @@ static auto compute_gamma = bob::extension::FunctionDoc(
   0,
   true
 )
-.add_prototype("a,res","")
+.add_prototype("a,res")
 .add_parameter("a", "int", "Index")
 .add_parameter("res", "array_like <float, 2D>", "Input data");
 static PyObject* PyBobLearnEMPLDABase_computeGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = compute_gamma.kwlist(0);
   int i = 0;
-  PyBlitzArrayObject* res = 0;  
+  PyBlitzArrayObject* res = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &res)) return 0;
 
-  auto res_ = make_safe(res);  
+  auto res_ = make_safe(res);
 
   self->cxx->computeGamma(i,*PyBlitzArrayCxx_AsBlitz<double,2>(res));
   Py_RETURN_NONE;
-  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)    
+  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)
 }
 
 /***** get_add_gamma *****/
@@ -757,7 +757,7 @@ static auto get_add_gamma = bob::extension::FunctionDoc(
 .add_return("output","array_like <float, 2D>","");
 static PyObject* PyBobLearnEMPLDABase_getAddGamma(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_add_gamma.kwlist(0);
 
   int i = 0;
@@ -781,7 +781,7 @@ static auto has_log_like_const_term = bob::extension::FunctionDoc(
 .add_return("output","bool","");
 static PyObject* PyBobLearnEMPLDABase_hasLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = has_log_like_const_term.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
@@ -790,7 +790,7 @@ static PyObject* PyBobLearnEMPLDABase_hasLogLikeConstTerm(PyBobLearnEMPLDABaseOb
     Py_RETURN_TRUE;
   else
     Py_RETURN_FALSE;
- BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)    
+ BOB_CATCH_MEMBER("`has_log_like_const_term` could not be read", 0)
 }
 
 
@@ -803,22 +803,22 @@ static auto compute_log_like_const_term = bob::extension::FunctionDoc(
   0,
   true
 )
-.add_prototype("a,res","")
+.add_prototype("a,res")
 .add_parameter("a", "int", "Index")
 .add_parameter("res", "array_like <float, 2D>", "Input data");
 static PyObject* PyBobLearnEMPLDABase_computeLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = compute_log_like_const_term.kwlist(0);
   int i = 0;
-  PyBlitzArrayObject* res = 0;  
+  PyBlitzArrayObject* res = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "iO&", kwlist, &i, &PyBlitzArray_Converter, &res)) return 0;
 
-  auto res_ = make_safe(res);  
+  auto res_ = make_safe(res);
 
   self->cxx->computeLogLikeConstTerm(i,*PyBlitzArrayCxx_AsBlitz<double,2>(res));
   Py_RETURN_NONE;
-  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)    
+  BOB_CATCH_MEMBER("`compute_gamma` could not be read", 0)
 }
 
 
@@ -833,17 +833,17 @@ static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
 )
 .add_prototype("a","output")
 .add_parameter("a", "int", "Index")
-.add_return("output","double","");
+.add_return("output","float","");
 static PyObject* PyBobLearnEMPLDABase_getAddLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_add_log_like_const_term.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
 
   return Py_BuildValue("d",self->cxx->getAddLogLikeConstTerm(i));
 
-  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)    
+  BOB_CATCH_MEMBER("`get_add_log_like_const_term` could not be read", 0)
 }
 
 
@@ -857,17 +857,17 @@ static auto get_log_like_const_term = bob::extension::FunctionDoc(
 )
 .add_prototype("a","output")
 .add_parameter("a", "int", "Index")
-.add_return("output","double","");
+.add_return("output","float","");
 static PyObject* PyBobLearnEMPLDABase_getLogLikeConstTerm(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = get_log_like_const_term.kwlist(0);
   int i = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "i", kwlist, &i)) return 0;
 
   return Py_BuildValue("d",self->cxx->getLogLikeConstTerm(i));
 
-  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)    
+  BOB_CATCH_MEMBER("`get_log_like_const_term` could not be read", 0)
 }
 
 /***** clear_maps *****/
@@ -877,14 +877,14 @@ static auto clear_maps = bob::extension::FunctionDoc(
   0,
   true
 )
-.add_prototype("","");
+.add_prototype("");
 static PyObject* PyBobLearnEMPLDABase_clearMaps(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   self->cxx->clearMaps();
   Py_RETURN_NONE;
 
-  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)    
+  BOB_CATCH_MEMBER("`clear_maps` could not be read", 0)
 }
 
 
@@ -902,23 +902,23 @@ static auto compute_log_likelihood_point_estimate = bob::extension::FunctionDoc(
 .add_parameter("xij", "array_like <float, 1D>", "")
 .add_parameter("hi", "array_like <float, 1D>", "")
 .add_parameter("wij", "array_like <float, 1D>", "")
-.add_return("output", "double", "");
+.add_return("output", "float", "");
 static PyObject* PyBobLearnEMPLDABase_computeLogLikelihoodPointEstimate(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   char** kwlist = compute_log_likelihood_point_estimate.kwlist(0);
-  PyBlitzArrayObject* xij, *hi, *wij;  
+  PyBlitzArrayObject* xij, *hi, *wij;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&", kwlist, &PyBlitzArray_Converter, &xij,
                                                                &PyBlitzArray_Converter, &hi,
                                                                &PyBlitzArray_Converter, &wij)) return 0;
 
   auto xij_ = make_safe(xij);
   auto hi_ = make_safe(hi);
-  auto wij_ = make_safe(wij);  
+  auto wij_ = make_safe(wij);
 
   return Py_BuildValue("d", self->cxx->computeLogLikelihoodPointEstimate(*PyBlitzArrayCxx_AsBlitz<double,1>(xij), *PyBlitzArrayCxx_AsBlitz<double,1>(hi), *PyBlitzArrayCxx_AsBlitz<double,1>(wij)));
-  
-  BOB_CATCH_MEMBER("`compute_log_likelihood_point_estimate` could not be read", 0)    
+
+  BOB_CATCH_MEMBER("`compute_log_likelihood_point_estimate` could not be read", 0)
 }
 
 /***** __precompute__ *****/
@@ -931,11 +931,11 @@ static auto __precompute__ = bob::extension::FunctionDoc(
 );
 static PyObject* PyBobLearnEMPLDABase_precompute(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   self->cxx->precompute();
   Py_RETURN_NONE;
 
-  BOB_CATCH_MEMBER("`precompute` could not be read", 0)    
+  BOB_CATCH_MEMBER("`precompute` could not be read", 0)
 }
 
 
@@ -951,11 +951,11 @@ static auto __precompute_log_like__ = bob::extension::FunctionDoc(
 );
 static PyObject* PyBobLearnEMPLDABase_precomputeLogLike(PyBobLearnEMPLDABaseObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
-  
+
   self->cxx->precomputeLogLike();
   Py_RETURN_NONE;
 
-  BOB_CATCH_MEMBER("`__precompute_log_like__` could not be read", 0)    
+  BOB_CATCH_MEMBER("`__precompute_log_like__` could not be read", 0)
 }
 
 
@@ -1013,25 +1013,25 @@ static PyMethodDef PyBobLearnEMPLDABase_methods[] = {
     (PyCFunction)PyBobLearnEMPLDABase_hasLogLikeConstTerm,
     METH_VARARGS|METH_KEYWORDS,
     has_log_like_const_term.doc()
-  },  
+  },
   {
     compute_log_like_const_term.name(),
     (PyCFunction)PyBobLearnEMPLDABase_computeLogLikeConstTerm,
     METH_VARARGS|METH_KEYWORDS,
     compute_log_like_const_term.doc()
-  },  
+  },
   {
     get_add_log_like_const_term.name(),
     (PyCFunction)PyBobLearnEMPLDABase_getAddLogLikeConstTerm,
     METH_VARARGS|METH_KEYWORDS,
     get_add_log_like_const_term.doc()
-  },  
+  },
   {
     get_log_like_const_term.name(),
     (PyCFunction)PyBobLearnEMPLDABase_getLogLikeConstTerm,
     METH_VARARGS|METH_KEYWORDS,
     get_log_like_const_term.doc()
-  },  
+  },
   {
     clear_maps.name(),
     (PyCFunction)PyBobLearnEMPLDABase_clearMaps,
@@ -1049,13 +1049,13 @@ static PyMethodDef PyBobLearnEMPLDABase_methods[] = {
     (PyCFunction)PyBobLearnEMPLDABase_precompute,
     METH_NOARGS,
     __precompute__.doc()
-  },   
+  },
   {
     __precompute_log_like__.name(),
     (PyCFunction)PyBobLearnEMPLDABase_precomputeLogLike,
     METH_NOARGS,
     __precompute_log_like__.doc()
-  },     
+  },
   {0} /* Sentinel */
 };
 
@@ -1095,4 +1095,3 @@ bool init_BobLearnEMPLDABase(PyObject* module)
   Py_INCREF(&PyBobLearnEMPLDABase_Type);
   return PyModule_AddObject(module, "PLDABase", (PyObject*)&PyBobLearnEMPLDABase_Type) >= 0;
 }
-
diff --git a/bob/learn/em/plda_machine.cpp b/bob/learn/em/plda_machine.cpp
index 0224d96b35615448481ce8db6a4bdbc54c1528bc..68df48851f9d8fad75e67790dad9bc127639e436 100644
--- a/bob/learn/em/plda_machine.cpp
+++ b/bob/learn/em/plda_machine.cpp
@@ -208,7 +208,7 @@ int PyBobLearnEMPLDAMachine_setNSamples(PyBobLearnEMPLDAMachineObject* self, PyO
 /***** w_sum_xit_beta_xi *****/
 static auto w_sum_xit_beta_xi = bob::extension::VariableDoc(
   "w_sum_xit_beta_xi",
-  "double",
+  "float",
   "Gets the :math:`A = -0.5 \\sum_{i} x_{i}^T \\beta x_{i}` value",
   ""
 );
@@ -300,7 +300,7 @@ int PyBobLearnEMPLDAMachine_setWeightedSum(PyBobLearnEMPLDAMachineObject* self,
 /***** log_likelihood *****/
 static auto log_likelihood = bob::extension::VariableDoc(
   "log_likelihood",
-  "double",
+  "float",
   "",
   ""
 );
@@ -313,7 +313,7 @@ int PyBobLearnEMPLDAMachine_setLogLikelihood(PyBobLearnEMPLDAMachineObject* self
   BOB_TRY
 
   if (!PyBob_NumberCheck(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an double", Py_TYPE(self)->tp_name, log_likelihood.name());
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a float", Py_TYPE(self)->tp_name, log_likelihood.name());
     return -1;
   }
 
@@ -572,7 +572,7 @@ static auto get_add_log_like_const_term = bob::extension::FunctionDoc(
 )
 .add_prototype("a","output")
 .add_parameter("a", "int", "Index")
-.add_return("output","double","");
+.add_return("output","float","");
 static PyObject* PyBobLearnEMPLDAMachine_getAddLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -596,7 +596,7 @@ static auto get_log_like_const_term = bob::extension::FunctionDoc(
 )
 .add_prototype("a","output")
 .add_parameter("a", "int", "Index")
-.add_return("output","double","");
+.add_return("output","float","");
 static PyObject* PyBobLearnEMPLDAMachine_getLogLikeConstTerm(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -616,7 +616,7 @@ static auto clear_maps = bob::extension::FunctionDoc(
   0,
   true
 )
-.add_prototype("","");
+.add_prototype("");
 static PyObject* PyBobLearnEMPLDAMachine_clearMaps(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
diff --git a/bob/learn/em/train.py b/bob/learn/em/train.py
index 517e94104251d9eaf64dcc2c205f2055ae4c7be9..ee2ecd44cfbb3b394d23565ec703b50d6a742705 100644
--- a/bob/learn/em/train.py
+++ b/bob/learn/em/train.py
@@ -59,14 +59,14 @@ def train(trainer, machine, data, max_iterations = 50, convergence_threshold=Non
 
 def train_jfa(trainer, jfa_base, data, max_iterations=10, initialize=True, rng=None):
   """
-  Trains a :py:class`bob.learn.em.JFABase` given a :py:class`bob.learn.em.JFATrainer` and the proper data
+  Trains a :py:class:`bob.learn.em.JFABase` given a :py:class:`bob.learn.em.JFATrainer` and the proper data
 
   **Parameters**:
-    trainer : :py:class`bob.learn.em.JFATrainer`
+    trainer : :py:class:`bob.learn.em.JFATrainer`
       A JFA trainer mechanism
-    jfa_base : :py:class`bob.learn.em.JFABase`
+    jfa_base : :py:class:`bob.learn.em.JFABase`
       A container machine
-    data : [[:py:class`bob.learn.em.GMMStats`]]
+    data : [[:py:class:`bob.learn.em.GMMStats`]]
       The data to be trained
     max_iterations : int
       The maximum number of iterations to train a machine