From bd27441b4c14a1900a5d4e7cdf21e695316f1dbd Mon Sep 17 00:00:00 2001
From: Manuel Guenther <manuel.guenther@idiap.ch>
Date: Thu, 5 Mar 2015 19:45:10 +0100
Subject: [PATCH] Changed 'enrol' (BE) to 'enroll' (AE)

---
 bob/learn/em/cpp/ISVTrainer.cpp               |   9 +-
 bob/learn/em/cpp/JFATrainer.cpp               |   3 +-
 bob/learn/em/cpp/PLDAMachine.cpp              |  16 +--
 bob/learn/em/cpp/PLDATrainer.cpp              |   2 +-
 bob/learn/em/cpp/ZTNorm.cpp                   |  15 ++-
 .../em/include/bob.learn.em/ISVMachine.h      |   6 +-
 .../em/include/bob.learn.em/ISVTrainer.h      |   4 +-
 .../em/include/bob.learn.em/JFAMachine.h      |   2 +-
 .../em/include/bob.learn.em/JFATrainer.h      |   2 +-
 .../em/include/bob.learn.em/PLDATrainer.h     |  12 +-
 bob/learn/em/isv_trainer.cpp                  |  69 ++++++-----
 bob/learn/em/jfa_trainer.cpp                  | 115 +++++++++---------
 bob/learn/em/plda_trainer.cpp                 |  34 +++---
 bob/learn/em/test/test_jfa_trainer.py         |  12 +-
 bob/learn/em/test/test_plda_trainer.py        |  15 ++-
 doc/guide.rst                                 |  25 ++--
 16 files changed, 166 insertions(+), 175 deletions(-)

diff --git a/bob/learn/em/cpp/ISVTrainer.cpp b/bob/learn/em/cpp/ISVTrainer.cpp
index cd3caa4..cb00321 100644
--- a/bob/learn/em/cpp/ISVTrainer.cpp
+++ b/bob/learn/em/cpp/ISVTrainer.cpp
@@ -46,7 +46,7 @@ bob::learn::em::ISVTrainer& bob::learn::em::ISVTrainer::operator=
 
 bool bob::learn::em::ISVTrainer::operator==(const bob::learn::em::ISVTrainer& b) const
 {
-  return m_rng == b.m_rng && 
+  return m_rng == b.m_rng &&
          m_relevance_factor == b.m_relevance_factor;
 }
 
@@ -58,7 +58,7 @@ bool bob::learn::em::ISVTrainer::operator!=(const bob::learn::em::ISVTrainer& b)
 bool bob::learn::em::ISVTrainer::is_similar_to(const bob::learn::em::ISVTrainer& b,
   const double r_epsilon, const double a_epsilon) const
 {
-  return  m_rng == b.m_rng && 
+  return  m_rng == b.m_rng &&
           m_relevance_factor == b.m_relevance_factor;
 }
 
@@ -105,7 +105,7 @@ double bob::learn::em::ISVTrainer::computeLikelihood(bob::learn::em::ISVBase& ma
   return 0;
 }
 
-void bob::learn::em::ISVTrainer::enrol(bob::learn::em::ISVMachine& machine,
+void bob::learn::em::ISVTrainer::enroll(bob::learn::em::ISVMachine& machine,
   const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& ar,
   const size_t n_iter)
 {
@@ -125,6 +125,3 @@ void bob::learn::em::ISVTrainer::enrol(bob::learn::em::ISVMachine& machine,
   const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
   machine.setZ(z);
 }
-
-
-
diff --git a/bob/learn/em/cpp/JFATrainer.cpp b/bob/learn/em/cpp/JFATrainer.cpp
index 9feaea7..23591c8 100644
--- a/bob/learn/em/cpp/JFATrainer.cpp
+++ b/bob/learn/em/cpp/JFATrainer.cpp
@@ -174,7 +174,7 @@ void bob::learn::em::JFATrainer::train(bob::learn::em::JFABase& machine,
 }
 */
 
-void bob::learn::em::JFATrainer::enrol(bob::learn::em::JFAMachine& machine,
+void bob::learn::em::JFATrainer::enroll(bob::learn::em::JFAMachine& machine,
   const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& ar,
   const size_t n_iter)
 {
@@ -197,4 +197,3 @@ void bob::learn::em::JFATrainer::enrol(bob::learn::em::JFAMachine& machine,
   machine.setY(y);
   machine.setZ(z);
 }
-
diff --git a/bob/learn/em/cpp/PLDAMachine.cpp b/bob/learn/em/cpp/PLDAMachine.cpp
index a390cb1..fe60630 100644
--- a/bob/learn/em/cpp/PLDAMachine.cpp
+++ b/bob/learn/em/cpp/PLDAMachine.cpp
@@ -827,13 +827,13 @@ double bob::learn::em::PLDAMachine::forward(const blitz::Array<double,2>& sample
 }
 
 double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<double,1>& sample,
-  bool enrol) const
+  bool enroll) const
 {
   if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
   // Check dimensionality
   bob::core::array::assertSameDimensionLength(sample.extent(0), getDimD());
 
-  int n_samples = 1 + (enrol?m_n_samples:0);
+  int n_samples = 1 + (enroll?m_n_samples:0);
 
   // 3/ Third term of the likelihood: -1/2*X^T*(SIGMA+A.A^T)^-1*X
   //    Efficient way: -1/2*sum_i(xi^T.sigma^-1.xi - xi^T.sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1.xi
@@ -842,9 +842,9 @@ double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<doub
   const blitz::Array<double,2>& beta = getPLDABase()->getBeta();
   const blitz::Array<double,2>& Ft_beta = getPLDABase()->getFtBeta();
   const blitz::Array<double,1>& mu = getPLDABase()->getMu();
-  double terma = (enrol?m_nh_sum_xit_beta_xi:0.);
+  double terma = (enroll?m_nh_sum_xit_beta_xi:0.);
   // sumWeighted
-  if (enrol && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
+  if (enroll && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
   else m_tmp_nf_1 = 0;
 
   // terma += -1 / 2. * (xi^t*beta*xi)
@@ -882,13 +882,13 @@ double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<doub
 }
 
 double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<double,2>& samples,
-  bool enrol) const
+  bool enroll) const
 {
   if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
   // Check dimensionality
   bob::core::array::assertSameDimensionLength(samples.extent(1), getDimD());
 
-  int n_samples = samples.extent(0) + (enrol?m_n_samples:0);
+  int n_samples = samples.extent(0) + (enroll?m_n_samples:0);
   // 3/ Third term of the likelihood: -1/2*X^T*(SIGMA+A.A^T)^-1*X
   //    Efficient way: -1/2*sum_i(xi^T.sigma^-1.xi - xi^T.sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1.xi
   //      -1/2*sumWeighted^T*(I+aF^T.(sigma^-1-sigma^-1*G*(I+G^T.sigma^-1.G)^-1*G^T*sigma^-1).F)^-1*sumWeighted
@@ -896,9 +896,9 @@ double bob::learn::em::PLDAMachine::computeLogLikelihood(const blitz::Array<doub
   const blitz::Array<double,2>& beta = getPLDABase()->getBeta();
   const blitz::Array<double,2>& Ft_beta = getPLDABase()->getFtBeta();
   const blitz::Array<double,1>& mu = getPLDABase()->getMu();
-  double terma = (enrol?m_nh_sum_xit_beta_xi:0.);
+  double terma = (enroll?m_nh_sum_xit_beta_xi:0.);
   // sumWeighted
-  if (enrol && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
+  if (enroll && m_n_samples > 0) m_tmp_nf_1 = m_weighted_sum;
   else m_tmp_nf_1 = 0;
   for (int k=0; k<samples.extent(0); ++k)
   {
diff --git a/bob/learn/em/cpp/PLDATrainer.cpp b/bob/learn/em/cpp/PLDATrainer.cpp
index f04e1ea..323ea6c 100644
--- a/bob/learn/em/cpp/PLDATrainer.cpp
+++ b/bob/learn/em/cpp/PLDATrainer.cpp
@@ -747,7 +747,7 @@ void bob::learn::em::PLDATrainer::updateSigma(bob::learn::em::PLDABase& machine,
 }
 
 
-void bob::learn::em::PLDATrainer::enrol(bob::learn::em::PLDAMachine& plda_machine,
+void bob::learn::em::PLDATrainer::enroll(bob::learn::em::PLDAMachine& plda_machine,
   const blitz::Array<double,2>& ar) const
 {
   // Gets dimension
diff --git a/bob/learn/em/cpp/ZTNorm.cpp b/bob/learn/em/cpp/ZTNorm.cpp
index 707b9a1..0319d04 100644
--- a/bob/learn/em/cpp/ZTNorm.cpp
+++ b/bob/learn/em/cpp/ZTNorm.cpp
@@ -26,13 +26,13 @@ static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
 
   // Compute the sizes
   int size_eval  = A.extent(0);
-  int size_enrol = A.extent(1);
+  int size_enroll = A.extent(1);
   int size_tnorm = (C ? C->extent(0) : 0);
   int size_znorm = (B ? B->extent(1) : 0);
 
   // Check the inputs
   bob::core::array::assertSameDimensionLength(A.extent(0), size_eval);
-  bob::core::array::assertSameDimensionLength(A.extent(1), size_enrol);
+  bob::core::array::assertSameDimensionLength(A.extent(1), size_enroll);
 
   if (B) {
     bob::core::array::assertSameDimensionLength(B->extent(1), size_znorm);
@@ -43,7 +43,7 @@ static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
   if (C) {
     bob::core::array::assertSameDimensionLength(C->extent(0), size_tnorm);
     if (size_tnorm > 0)
-      bob::core::array::assertSameDimensionLength(C->extent(1), size_enrol);
+      bob::core::array::assertSameDimensionLength(C->extent(1), size_enroll);
   }
 
   if (D && size_znorm > 0 && size_tnorm > 0) {
@@ -57,7 +57,7 @@ static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
   }
 
   bob::core::array::assertSameDimensionLength(scores.extent(0), size_eval);
-  bob::core::array::assertSameDimensionLength(scores.extent(1), size_enrol);
+  bob::core::array::assertSameDimensionLength(scores.extent(1), size_enroll);
 
   // Declare needed IndexPlaceholder
   blitz::firstIndex ii;
@@ -87,7 +87,7 @@ static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
   else
     zA = A;
 
-  blitz::Array<double,2> zC(size_tnorm, size_enrol);
+  blitz::Array<double,2> zC(size_tnorm, size_enroll);
   if (D && size_tnorm > 0 && size_znorm > 0) {
     blitz::Array<double,1> mean_Dimp(size_tnorm);
     blitz::Array<double,1> std_Dimp(size_tnorm);
@@ -125,8 +125,8 @@ static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
 
   if (C && size_tnorm > 0)
   {
-    blitz::Array<double,1> mean_zC(size_enrol);
-    blitz::Array<double,1> std_zC(size_enrol);
+    blitz::Array<double,1> mean_zC(size_enroll);
+    blitz::Array<double,1> std_zC(size_enroll);
 
     // ztA = (zA - mean(zC)) / std(zC)  [ztnorm on eval scores]
     mean_zC = blitz::mean(zC(jj, ii), jj);
@@ -179,4 +179,3 @@ void bob::learn::em::zNorm(const blitz::Array<double,2>& rawscores_probes_vs_mod
   _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, NULL,
                  NULL, NULL, scores);
 }
-
diff --git a/bob/learn/em/include/bob.learn.em/ISVMachine.h b/bob/learn/em/include/bob.learn.em/ISVMachine.h
index 8b941bc..e4a5cc9 100644
--- a/bob/learn/em/include/bob.learn.em/ISVMachine.h
+++ b/bob/learn/em/include/bob.learn.em/ISVMachine.h
@@ -178,7 +178,7 @@ class ISVMachine
     *
     * @param input input data used by the machine
     * @warning Inputs are checked
-    * @return score value computed by the machine    
+    * @return score value computed by the machine
     */
     double forward(const bob::learn::em::GMMStats& input);
     /**
@@ -193,7 +193,7 @@ class ISVMachine
      *
      * @param input input data used by the machine
      * @warning Inputs are NOT checked
-     * @return score value computed by the machine     
+     * @return score value computed by the machine
      */
     double forward_(const bob::learn::em::GMMStats& input);
 
@@ -214,7 +214,7 @@ class ISVMachine
     // UBM
     boost::shared_ptr<bob::learn::em::ISVBase> m_isv_base;
 
-    // y and z vectors/factors learned during the enrolment procedure
+    // y and z vectors/factors learned during the enrollment procedure
     blitz::Array<double,1> m_z;
 
     // cache
diff --git a/bob/learn/em/include/bob.learn.em/ISVTrainer.h b/bob/learn/em/include/bob.learn.em/ISVTrainer.h
index f0177ec..ae8f008 100644
--- a/bob/learn/em/include/bob.learn.em/ISVTrainer.h
+++ b/bob/learn/em/include/bob.learn.em/ISVTrainer.h
@@ -92,7 +92,7 @@ class ISVTrainer
     /**
      * @brief Enrol a client
      */
-    void enrol(bob::learn::em::ISVMachine& machine,
+    void enroll(bob::learn::em::ISVMachine& machine,
       const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& features,
       const size_t n_iter);
 
@@ -133,7 +133,7 @@ class ISVTrainer
     { m_base_trainer.setAccUA1(acc); }
     void setAccUA2(const blitz::Array<double,2>& acc)
     { m_base_trainer.setAccUA2(acc); }
-    
+
     /**
      * @brief Sets the Random Number Generator
      */
diff --git a/bob/learn/em/include/bob.learn.em/JFAMachine.h b/bob/learn/em/include/bob.learn.em/JFAMachine.h
index 6569b85..51649fe 100644
--- a/bob/learn/em/include/bob.learn.em/JFAMachine.h
+++ b/bob/learn/em/include/bob.learn.em/JFAMachine.h
@@ -237,7 +237,7 @@ class JFAMachine
     // UBM
     boost::shared_ptr<bob::learn::em::JFABase> m_jfa_base;
 
-    // y and z vectors/factors learned during the enrolment procedure
+    // y and z vectors/factors learned during the enrollment procedure
     blitz::Array<double,1> m_y;
     blitz::Array<double,1> m_z;
 
diff --git a/bob/learn/em/include/bob.learn.em/JFATrainer.h b/bob/learn/em/include/bob.learn.em/JFATrainer.h
index 07c0646..d697442 100644
--- a/bob/learn/em/include/bob.learn.em/JFATrainer.h
+++ b/bob/learn/em/include/bob.learn.em/JFATrainer.h
@@ -145,7 +145,7 @@ class JFATrainer
     /**
      * @brief Enrol a client
      */
-    void enrol(bob::learn::em::JFAMachine& machine,
+    void enroll(bob::learn::em::JFAMachine& machine,
       const std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& features,
       const size_t n_iter);
 
diff --git a/bob/learn/em/include/bob.learn.em/PLDATrainer.h b/bob/learn/em/include/bob.learn.em/PLDATrainer.h
index 3fae06f..f5bc402 100644
--- a/bob/learn/em/include/bob.learn.em/PLDATrainer.h
+++ b/bob/learn/em/include/bob.learn.em/PLDATrainer.h
@@ -215,10 +215,10 @@ class PLDATrainer
     /**
      * @brief Main procedure for enrolling a PLDAMachine
      */
-    void enrol(bob::learn::em::PLDAMachine& plda_machine,
+    void enroll(bob::learn::em::PLDAMachine& plda_machine,
       const blitz::Array<double,2>& ar) const;
-      
-      
+
+
     /**
      * @brief Sets the Random Number Generator
      */
@@ -229,12 +229,12 @@ class PLDATrainer
      * @brief Gets the Random Number Generator
      */
     boost::shared_ptr<boost::mt19937> getRng() const
-    { return m_rng; }      
+    { return m_rng; }
 
   private:
-  
+
 	    boost::shared_ptr<boost::mt19937> m_rng;
-	  
+
 	    //representation
 	    size_t m_dim_d; ///< Dimensionality of the input features
 	    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
diff --git a/bob/learn/em/isv_trainer.cpp b/bob/learn/em/isv_trainer.cpp
index 2bafaf8..468d49f 100644
--- a/bob/learn/em/isv_trainer.cpp
+++ b/bob/learn/em/isv_trainer.cpp
@@ -18,7 +18,7 @@ static int extract_GMMStats_1d(PyObject *list,
                              std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& training_data)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
+
     PyBobLearnEMGMMStatsObject* stats;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
       PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
@@ -69,7 +69,7 @@ int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++)
   {
-    PyBlitzArrayObject* blitz_object; 
+    PyBlitzArrayObject* blitz_object;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
       PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
       return -1;
@@ -156,7 +156,7 @@ static int PyBobLearnEMISVTrainer_init(PyBobLearnEMISVTrainerObject* self, PyObj
         auto tmp_ = make_safe(tmp);
         arg = PyList_GET_ITEM(tmp, 0);
       }
-      
+
       if(PyBobLearnEMISVTrainer_Check(arg))
         // If the constructor input is ISVTrainer object
         return PyBobLearnEMISVTrainer_init_copy(self, args, kwargs);
@@ -230,24 +230,24 @@ int PyBobLearnEMISVTrainer_set_acc_u_a1(PyBobLearnEMISVTrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_u_a1.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 3){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 3D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_u_a1.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccUA1().extent(0) && input->shape[1] != (Py_ssize_t)self->cxx->getAccUA1().extent(1) && input->shape[2] != (Py_ssize_t)self->cxx->getAccUA1().extent(2)) {
     PyErr_Format(PyExc_TypeError, "`%s' 3D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccUA1().extent(0), (Py_ssize_t)self->cxx->getAccUA1().extent(1), (Py_ssize_t)self->cxx->getAccUA1().extent(2), (Py_ssize_t)input->shape[0], (Py_ssize_t)input->shape[1], (Py_ssize_t)input->shape[2], acc_u_a1.name());
     return -1;
-  }  
+  }
+
 
-  
   auto b = PyBlitzArrayCxx_AsBlitz<double,3>(input, "acc_u_a1");
   if (!b) return -1;
   self->cxx->setAccUA1(*b);
@@ -275,23 +275,23 @@ int PyBobLearnEMISVTrainer_set_acc_u_a2(PyBobLearnEMISVTrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_u_a2.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_u_a2.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccUA2().extent(0) && input->shape[1] != (Py_ssize_t)self->cxx->getAccUA2().extent(1)) {
     PyErr_Format(PyExc_TypeError, "`%s' 3D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccUA2().extent(0), (Py_ssize_t)self->cxx->getAccUA2().extent(1), input->shape[0], input->shape[1], acc_u_a2.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "acc_u_a2");
   if (!b) return -1;
   self->cxx->setAccUA2(*b);
@@ -319,12 +319,12 @@ int PyBobLearnEMISVTrainer_set_X(PyBobLearnEMISVTrainerObject* self, PyObject* v
     PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __X__.name());
     return -1;
   }
-    
+
   std::vector<blitz::Array<double,2> > data;
   if(list_as_vector(value ,data)==0){
     self->cxx->setX(data);
   }
-    
+
   return 0;
   BOB_CATCH_MEMBER("__X__ could not be written", 0)
 }
@@ -349,18 +349,18 @@ int PyBobLearnEMISVTrainer_set_Z(PyBobLearnEMISVTrainerObject* self, PyObject* v
     PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Z__.name());
     return -1;
   }
-    
+
   std::vector<blitz::Array<double,1> > data;
   if(list_as_vector(value ,data)==0){
     self->cxx->setZ(data);
   }
-    
+
   return 0;
   BOB_CATCH_MEMBER("__Z__ could not be written", 0)
 }
 
 
-static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = { 
+static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = {
   {
    acc_u_a1.name(),
    (getter)PyBobLearnEMISVTrainer_get_acc_u_a1,
@@ -389,7 +389,7 @@ static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = {
    __Z__.doc(),
    0
   },
-  
+
 
   {0}  // Sentinel
 };
@@ -418,7 +418,7 @@ static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject*
 
   PyBobLearnEMISVBaseObject* isv_base = 0;
   PyObject* stats = 0;
-  PyBoostMt19937Object* rng = 0;  
+  PyBoostMt19937Object* rng = 0;
 
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!|O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
                                                                  &PyList_Type, &stats,
@@ -484,11 +484,11 @@ static auto m_step = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMISVTrainer_m_step(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
-  // Parses input arguments in a single shot 
+  // Parses input arguments in a single shot
   char** kwlist = m_step.kwlist(0);
 
   PyBobLearnEMISVBaseObject* isv_base = 0;
-  PyObject* stats = 0;  
+  PyObject* stats = 0;
 
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
                                                                  &PyList_Type, &stats)) return 0;
@@ -502,9 +502,9 @@ static PyObject* PyBobLearnEMISVTrainer_m_step(PyBobLearnEMISVTrainerObject* sel
 
 
 
-/*** enrol ***/
-static auto enrol = bob::extension::FunctionDoc(
-  "enrol",
+/*** enroll ***/
+static auto enroll = bob::extension::FunctionDoc(
+  "enroll",
   "",
   "",
   true
@@ -513,11 +513,11 @@ static auto enrol = bob::extension::FunctionDoc(
 .add_parameter("isv_machine", ":py:class:`bob.learn.em.ISVMachine`", "ISVMachine Object")
 .add_parameter("features", "list(:py:class:`bob.learn.em.GMMStats`)`", "")
 .add_parameter("n_iter", "int", "Number of iterations");
-static PyObject* PyBobLearnEMISVTrainer_enrol(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
+static PyObject* PyBobLearnEMISVTrainer_enroll(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
   // Parses input arguments in a single shot
-  char** kwlist = enrol.kwlist(0);
+  char** kwlist = enroll.kwlist(0);
 
   PyBobLearnEMISVMachineObject* isv_machine = 0;
   PyObject* stats = 0;
@@ -529,9 +529,9 @@ static PyObject* PyBobLearnEMISVTrainer_enrol(PyBobLearnEMISVTrainerObject* self
 
   std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > training_data;
   if(extract_GMMStats_1d(stats ,training_data)==0)
-    self->cxx->enrol(*isv_machine->cxx, training_data, n_iter);
+    self->cxx->enroll(*isv_machine->cxx, training_data, n_iter);
 
-  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
+  BOB_CATCH_MEMBER("cannot perform the enroll method", 0)
 
   Py_RETURN_NONE;
 }
@@ -558,10 +558,10 @@ static PyMethodDef PyBobLearnEMISVTrainer_methods[] = {
     m_step.doc()
   },
   {
-    enrol.name(),
-    (PyCFunction)PyBobLearnEMISVTrainer_enrol,
+    enroll.name(),
+    (PyCFunction)PyBobLearnEMISVTrainer_enroll,
     METH_VARARGS|METH_KEYWORDS,
-    enrol.doc()
+    enroll.doc()
   },
   {0} /* Sentinel */
 };
@@ -602,4 +602,3 @@ bool init_BobLearnEMISVTrainer(PyObject* module)
   Py_INCREF(&PyBobLearnEMISVTrainer_Type);
   return PyModule_AddObject(module, "ISVTrainer", (PyObject*)&PyBobLearnEMISVTrainer_Type) >= 0;
 }
-
diff --git a/bob/learn/em/jfa_trainer.cpp b/bob/learn/em/jfa_trainer.cpp
index 60c7010..19b5e15 100644
--- a/bob/learn/em/jfa_trainer.cpp
+++ b/bob/learn/em/jfa_trainer.cpp
@@ -18,7 +18,7 @@ static int extract_GMMStats_1d(PyObject *list,
                              std::vector<boost::shared_ptr<bob::learn::em::GMMStats> >& training_data)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++){
-  
+
     PyBobLearnEMGMMStatsObject* stats;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O!", &PyBobLearnEMGMMStats_Type, &stats)){
       PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
@@ -69,7 +69,7 @@ int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++)
   {
-    PyBlitzArrayObject* blitz_object; 
+    PyBlitzArrayObject* blitz_object;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
       PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
       return -1;
@@ -195,22 +195,22 @@ int PyBobLearnEMJFATrainer_set_acc_v_a1(PyBobLearnEMJFATrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_v_a1.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 3){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 3D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_v_a1.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccVA1().extent(0) && input->shape[1] != (Py_ssize_t)self->cxx->getAccVA1().extent(1) && input->shape[2] != (Py_ssize_t)self->cxx->getAccVA1().extent(2)) {
     PyErr_Format(PyExc_TypeError, "`%s' 3D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccVA1().extent(0), (Py_ssize_t)self->cxx->getAccVA1().extent(1), (Py_ssize_t)self->cxx->getAccVA1().extent(2), (Py_ssize_t)input->shape[0], (Py_ssize_t)input->shape[1], (Py_ssize_t)input->shape[2], acc_v_a1.name());
     return -1;
-  }  
+  }
 
   auto b = PyBlitzArrayCxx_AsBlitz<double,3>(input, "acc_v_a1");
   if (!b) return -1;
@@ -239,24 +239,24 @@ int PyBobLearnEMJFATrainer_set_acc_v_a2(PyBobLearnEMJFATrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_v_a2.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_v_a2.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccVA2().extent(0) && input->shape[1] != (Py_ssize_t)self->cxx->getAccVA2().extent(1)) {
     PyErr_Format(PyExc_TypeError, "`%s' 2D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccVA2().extent(0), (Py_ssize_t)self->cxx->getAccVA2().extent(1), input->shape[0], input->shape[1], acc_v_a2.name());
     return -1;
-  }  
-  
-  
+  }
+
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "acc_v_a2");
   if (!b) return -1;
   self->cxx->setAccVA2(*b);
@@ -284,23 +284,23 @@ int PyBobLearnEMJFATrainer_set_acc_u_a1(PyBobLearnEMJFATrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_u_a1.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 3){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 3D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_u_a1.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccUA1().extent(0) && input->shape[1] != (Py_ssize_t)self->cxx->getAccUA1().extent(1) && input->shape[2] != (Py_ssize_t)self->cxx->getAccUA1().extent(2)) {
     PyErr_Format(PyExc_TypeError, "`%s' 3D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccUA1().extent(0), (Py_ssize_t)self->cxx->getAccUA1().extent(1), (Py_ssize_t)self->cxx->getAccUA1().extent(2), (Py_ssize_t)input->shape[0], (Py_ssize_t)input->shape[1], (Py_ssize_t)input->shape[2], acc_u_a1.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,3>(input, "acc_u_a1");
   if (!b) return -1;
   self->cxx->setAccUA1(*b);
@@ -328,23 +328,23 @@ int PyBobLearnEMJFATrainer_set_acc_u_a2(PyBobLearnEMJFATrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_u_a2.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 2){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 2D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_u_a2.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccUA2().extent(0) && input->shape[1] != (Py_ssize_t)self->cxx->getAccUA2().extent(1)) {
     PyErr_Format(PyExc_TypeError, "`%s' 3D `input` array should have the shape [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] not [%" PY_FORMAT_SIZE_T "d, %" PY_FORMAT_SIZE_T "d] for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccUA2().extent(0), (Py_ssize_t)self->cxx->getAccUA2().extent(1), input->shape[0], input->shape[1], acc_u_a2.name());
     return -1;
-  }  
-  
+  }
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,2>(input, "acc_u_a2");
   if (!b) return -1;
   self->cxx->setAccUA2(*b);
@@ -372,24 +372,24 @@ int PyBobLearnEMJFATrainer_set_acc_d_a1(PyBobLearnEMJFATrainerObject* self, PyOb
     return -1;
   }
   auto o_ = make_safe(input);
-  
-  // perform check on the input  
+
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_d_a1.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_d_a1.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccDA1().extent(0)) {
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccDA1().extent(0), input->shape[0], acc_d_a1.name());
     return -1;
-  }  
-  
-  
+  }
+
+
   auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "acc_d_a1");
   if (!b) return -1;
   self->cxx->setAccDA1(*b);
@@ -418,16 +418,16 @@ int PyBobLearnEMJFATrainer_set_acc_d_a2(PyBobLearnEMJFATrainerObject* self, PyOb
   }
   auto o_ = make_safe(input);
 
-  // perform check on the input  
+  // perform check on the input
   if (input->type_num != NPY_FLOAT64){
     PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, acc_d_a2.name());
     return -1;
-  }  
+  }
 
   if (input->ndim != 1){
     PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, acc_d_a2.name());
     return -1;
-  }  
+  }
 
   if (input->shape[0] != (Py_ssize_t)self->cxx->getAccDA2().extent(0)) {
     PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, (Py_ssize_t)self->cxx->getAccDA2().extent(0), input->shape[0], acc_d_a2.name());
@@ -461,12 +461,12 @@ int PyBobLearnEMJFATrainer_set_X(PyBobLearnEMJFATrainerObject* self, PyObject* v
     PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __X__.name());
     return -1;
   }
-    
+
   std::vector<blitz::Array<double,2> > data;
   if(list_as_vector(value ,data)==0){
     self->cxx->setX(data);
   }
-    
+
   return 0;
   BOB_CATCH_MEMBER("__X__ could not be written", 0)
 }
@@ -492,12 +492,12 @@ int PyBobLearnEMJFATrainer_set_Y(PyBobLearnEMJFATrainerObject* self, PyObject* v
     PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Y__.name());
     return -1;
   }
-    
+
   std::vector<blitz::Array<double,1> > data;
   if(list_as_vector(value ,data)==0){
     self->cxx->setY(data);
   }
-    
+
   return 0;
   BOB_CATCH_MEMBER("__Y__ could not be written", 0)
 }
@@ -523,19 +523,19 @@ int PyBobLearnEMJFATrainer_set_Z(PyBobLearnEMJFATrainerObject* self, PyObject* v
     PyErr_Format(PyExc_TypeError, "Expected a list in `%s'", __Z__.name());
     return -1;
   }
-    
+
   std::vector<blitz::Array<double,1> > data;
   if(list_as_vector(value ,data)==0){
     self->cxx->setZ(data);
   }
-    
+
   return 0;
   BOB_CATCH_MEMBER("__Z__ could not be written", 0)
 }
 
 
 
-static PyGetSetDef PyBobLearnEMJFATrainer_getseters[] = { 
+static PyGetSetDef PyBobLearnEMJFATrainer_getseters[] = {
   {
    acc_v_a1.name(),
    (getter)PyBobLearnEMJFATrainer_get_acc_v_a1,
@@ -599,8 +599,8 @@ static PyGetSetDef PyBobLearnEMJFATrainer_getseters[] = {
    __Z__.doc(),
    0
   },
-  
-  
+
+
 
   {0}  // Sentinel
 };
@@ -629,7 +629,7 @@ static PyObject* PyBobLearnEMJFATrainer_initialize(PyBobLearnEMJFATrainerObject*
 
   PyBobLearnEMJFABaseObject* jfa_base = 0;
   PyObject* stats = 0;
-  PyBoostMt19937Object* rng = 0;  
+  PyBoostMt19937Object* rng = 0;
 
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!|O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
                                                                  &PyList_Type, &stats,
@@ -792,7 +792,7 @@ static auto m_step2 = bob::extension::FunctionDoc(
 static PyObject* PyBobLearnEMJFATrainer_m_step2(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
-  // Parses input arguments in a single shot 
+  // Parses input arguments in a single shot
   char** kwlist = m_step2.kwlist(0);
 
   PyBobLearnEMJFABaseObject* jfa_base = 0;
@@ -939,9 +939,9 @@ static PyObject* PyBobLearnEMJFATrainer_finalize3(PyBobLearnEMJFATrainerObject*
 }
 
 
-/*** enrol ***/
-static auto enrol = bob::extension::FunctionDoc(
-  "enrol",
+/*** enroll ***/
+static auto enroll = bob::extension::FunctionDoc(
+  "enroll",
   "",
   "",
   true
@@ -950,11 +950,11 @@ static auto enrol = bob::extension::FunctionDoc(
 .add_parameter("jfa_machine", ":py:class:`bob.learn.em.JFAMachine`", "JFAMachine Object")
 .add_parameter("features", "list(:py:class:`bob.learn.em.GMMStats`)`", "")
 .add_parameter("n_iter", "int", "Number of iterations");
-static PyObject* PyBobLearnEMJFATrainer_enrol(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+static PyObject* PyBobLearnEMJFATrainer_enroll(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
   // Parses input arguments in a single shot
-  char** kwlist = enrol.kwlist(0);
+  char** kwlist = enroll.kwlist(0);
 
   PyBobLearnEMJFAMachineObject* jfa_machine = 0;
   PyObject* stats = 0;
@@ -966,9 +966,9 @@ static PyObject* PyBobLearnEMJFATrainer_enrol(PyBobLearnEMJFATrainerObject* self
 
   std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > training_data;
   if(extract_GMMStats_1d(stats ,training_data)==0)
-    self->cxx->enrol(*jfa_machine->cxx, training_data, n_iter);
+    self->cxx->enroll(*jfa_machine->cxx, training_data, n_iter);
 
-  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
+  BOB_CATCH_MEMBER("cannot perform the enroll method", 0)
 
   Py_RETURN_NONE;
 }
@@ -1037,10 +1037,10 @@ static PyMethodDef PyBobLearnEMJFATrainer_methods[] = {
     finalize3.doc()
   },
   {
-    enrol.name(),
-    (PyCFunction)PyBobLearnEMJFATrainer_enrol,
+    enroll.name(),
+    (PyCFunction)PyBobLearnEMJFATrainer_enroll,
     METH_VARARGS|METH_KEYWORDS,
-    enrol.doc()
+    enroll.doc()
   },
   {0} /* Sentinel */
 };
@@ -1081,4 +1081,3 @@ bool init_BobLearnEMJFATrainer(PyObject* module)
   Py_INCREF(&PyBobLearnEMJFATrainer_Type);
   return PyModule_AddObject(module, "JFATrainer", (PyObject*)&PyBobLearnEMJFATrainer_Type) >= 0;
 }
-
diff --git a/bob/learn/em/plda_trainer.cpp b/bob/learn/em/plda_trainer.cpp
index cfc25d6..c0b179c 100644
--- a/bob/learn/em/plda_trainer.cpp
+++ b/bob/learn/em/plda_trainer.cpp
@@ -62,7 +62,7 @@ int list_as_vector(PyObject* list, std::vector<blitz::Array<double,N> >& vec)
 {
   for (int i=0; i<PyList_GET_SIZE(list); i++)
   {
-    PyBlitzArrayObject* blitz_object; 
+    PyBlitzArrayObject* blitz_object;
     if (!PyArg_Parse(PyList_GetItem(list, i), "O&", &PyBlitzArray_Converter, &blitz_object)){
       PyErr_Format(PyExc_RuntimeError, "Expected numpy array object");
       return -1;
@@ -161,7 +161,7 @@ static int PyBobLearnEMPLDATrainer_init(PyBobLearnEMPLDATrainerObject* self, PyO
       auto tmp_ = make_safe(tmp);
       arg = PyList_GET_ITEM(tmp, 0);
     }
-      
+
     if(PyBobLearnEMPLDATrainer_Check(arg))
       // If the constructor input is PLDATrainer object
       return PyBobLearnEMPLDATrainer_init_copy(self, args, kwargs);
@@ -359,7 +359,7 @@ int PyBobLearnEMPLDATrainer_setUseSumSecondOrder(PyBobLearnEMPLDATrainerObject*
 
 
 
-static PyGetSetDef PyBobLearnEMPLDATrainer_getseters[] = { 
+static PyGetSetDef PyBobLearnEMPLDATrainer_getseters[] = {
   {
    z_first_order.name(),
    (getter)PyBobLearnEMPLDATrainer_get_z_first_order,
@@ -555,9 +555,9 @@ static PyObject* PyBobLearnEMPLDATrainer_finalize(PyBobLearnEMPLDATrainerObject*
 
 
 
-/*** enrol ***/
-static auto enrol = bob::extension::FunctionDoc(
-  "enrol",
+/*** enroll ***/
+static auto enroll = bob::extension::FunctionDoc(
+  "enroll",
   "Main procedure for enrolling a PLDAMachine",
   "",
   true
@@ -565,11 +565,11 @@ static auto enrol = bob::extension::FunctionDoc(
 .add_prototype("plda_machine,data")
 .add_parameter("plda_machine", ":py:class:`bob.learn.em.PLDAMachine`", "PLDAMachine Object")
 .add_parameter("data", "list", "");
-static PyObject* PyBobLearnEMPLDATrainer_enrol(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
+static PyObject* PyBobLearnEMPLDATrainer_enroll(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
   /* Parses input arguments in a single shot */
-  char** kwlist = enrol.kwlist(0);
+  char** kwlist = enroll.kwlist(0);
 
   PyBobLearnEMPLDAMachineObject* plda_machine = 0;
   PyBlitzArrayObject* data = 0;
@@ -578,9 +578,9 @@ static PyObject* PyBobLearnEMPLDATrainer_enrol(PyBobLearnEMPLDATrainerObject* se
                                                                  &PyBlitzArray_Converter, &data)) return 0;
 
   auto data_ = make_safe(data);
-  self->cxx->enrol(*plda_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
+  self->cxx->enroll(*plda_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
 
-  BOB_CATCH_MEMBER("cannot perform the enrol method", 0)
+  BOB_CATCH_MEMBER("cannot perform the enroll method", 0)
 
   Py_RETURN_NONE;
 }
@@ -589,7 +589,7 @@ static PyObject* PyBobLearnEMPLDATrainer_enrol(PyBobLearnEMPLDATrainerObject* se
 /*** is_similar_to ***/
 static auto is_similar_to = bob::extension::FunctionDoc(
   "is_similar_to",
-  
+
   "Compares this PLDATrainer with the ``other`` one to be approximately the same.",
   "The optional values ``r_epsilon`` and ``a_epsilon`` refer to the "
   "relative and absolute precision for the ``weights``, ``biases`` "
@@ -614,8 +614,8 @@ static PyObject* PyBobLearnEMPLDATrainer_IsSimilarTo(PyBobLearnEMPLDATrainerObje
         &PyBobLearnEMPLDATrainer_Type, &other,
         &r_epsilon, &a_epsilon)){
 
-        is_similar_to.print_usage(); 
-        return 0;        
+        is_similar_to.print_usage();
+        return 0;
   }
 
   if (self->cxx->is_similar_to(*other->cxx, r_epsilon, a_epsilon))
@@ -650,12 +650,12 @@ static PyMethodDef PyBobLearnEMPLDATrainer_methods[] = {
     (PyCFunction)PyBobLearnEMPLDATrainer_finalize,
     METH_VARARGS|METH_KEYWORDS,
     finalize.doc()
-  },  
+  },
   {
-    enrol.name(),
-    (PyCFunction)PyBobLearnEMPLDATrainer_enrol,
+    enroll.name(),
+    (PyCFunction)PyBobLearnEMPLDATrainer_enroll,
     METH_VARARGS|METH_KEYWORDS,
-    enrol.doc()
+    enroll.doc()
   },
   {
     is_similar_to.name(),
diff --git a/bob/learn/em/test/test_jfa_trainer.py b/bob/learn/em/test/test_jfa_trainer.py
index afb4613..a479831 100644
--- a/bob/learn/em/test/test_jfa_trainer.py
+++ b/bob/learn/em/test/test_jfa_trainer.py
@@ -166,7 +166,7 @@ def test_JFATrainer_updateZandD():
 
 
 def test_JFATrainAndEnrol():
-  # Train and enrol a JFAMachine
+  # Train and enroll a JFAMachine
 
   # Calls the train function
   ubm = GMMMachine(2,3)
@@ -191,7 +191,7 @@ def test_JFATrainAndEnrol():
   assert numpy.allclose(mb.u, u_ref, eps)
   assert numpy.allclose(mb.d, d_ref, eps)
 
-  # Calls the enrol function
+  # Calls the enroll function
   m = JFAMachine(mb)
 
   Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
@@ -204,7 +204,7 @@ def test_JFATrainAndEnrol():
   gse2.sum_px = Fe[:,1].reshape(2,3)
 
   gse = [gse1, gse2]
-  t.enrol(m, gse, 5)
+  t.enroll(m, gse, 5)
 
   y_ref = numpy.array([0.555991469319657, 0.002773650670010], 'float64')
   z_ref = numpy.array([8.2228e-20, 3.15216909492e-13, -1.48616735364395e-10, 1.0625905e-17, 3.7150503117895e-11, 1.71104e-19], 'float64')
@@ -213,7 +213,7 @@ def test_JFATrainAndEnrol():
 
 
 def test_ISVTrainAndEnrol():
-  # Train and enrol an 'ISVMachine'
+  # Train and enroll an 'ISVMachine'
 
   eps = 1e-10
   d_ref = numpy.array([0.39601136, 0.07348469, 0.47712682, 0.44738127, 0.43179856, 0.45086029], 'float64')
@@ -236,7 +236,7 @@ def test_ISVTrainAndEnrol():
   assert numpy.allclose(mb.d, d_ref, eps)
   assert numpy.allclose(mb.u, u_ref, eps)
 
-  # Calls the enrol function
+  # Calls the enroll function
   m = ISVMachine(mb)
 
   Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
@@ -249,7 +249,7 @@ def test_ISVTrainAndEnrol():
   gse2.sum_px = Fe[:,1].reshape(2,3)
 
   gse = [gse1, gse2]
-  t.enrol(m, gse, 5)
+  t.enroll(m, gse, 5)
   assert numpy.allclose(m.z, z_ref, eps)
 
 
diff --git a/bob/learn/em/test/test_plda_trainer.py b/bob/learn/em/test/test_plda_trainer.py
index b28ea45..edc26b1 100644
--- a/bob/learn/em/test/test_plda_trainer.py
+++ b/bob/learn/em/test/test_plda_trainer.py
@@ -376,7 +376,7 @@ def test_plda_EM_vs_Python():
   #t.train(m, l)
   bob.learn.em.train(t, m, l, max_iterations=10)
   t_py.train(m_py, l)
-  
+
   assert numpy.allclose(m.mu, m_py.mu)
   assert numpy.allclose(m.f, m_py.f)
   assert numpy.allclose(m.g, m_py.g)
@@ -677,10 +677,10 @@ def test_plda_enrollment():
   x1 = numpy.array([0.8032, 0.3503, 0.4587, 0.9511, 0.1330, 0.0703, 0.7061])
   x2 = numpy.array([0.9317, 0.1089, 0.6517, 0.1461, 0.6940, 0.6256, 0.0437])
   x3 = numpy.array([0.7979, 0.9862, 0.4367, 0.3447, 0.0488, 0.2252, 0.5810])
-  a_enrol = []
-  a_enrol.append(x1)
-  a_enrol.append(x2)
-  a_enrol = numpy.array(a_enrol)
+  a_enroll = []
+  a_enroll.append(x1)
+  a_enroll.append(x2)
+  a_enroll = numpy.array(a_enroll)
 
   # reference likelihood from Prince implementation
   ll_ref = -182.8880743535197
@@ -689,9 +689,9 @@ def test_plda_enrollment():
   # and x3 as a probe sample
   m = PLDAMachine(mb)
   t = PLDATrainer()
-  t.enrol(m, a_enrol)
+  t.enroll(m, a_enroll)
   ll = m.compute_log_likelihood(x3)
-  
+
   assert abs(ll - ll_ref) < 1e-10
 
   # reference obtained by computing the likelihood of [x1,x2,x3], [x1,x2]
@@ -741,4 +741,3 @@ def test_plda_comparisons():
   assert (t1 == t2 ) is False
   assert t1 != t2
   assert (t1.is_similar_to(t2) ) is False
-
diff --git a/doc/guide.rst b/doc/guide.rst
index 635791d..be565f4 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -178,7 +178,7 @@ portion of between-class variation.
 
 An instance of :py:class:`bob.learn.em.JFABase` carries information about
 the matrices :math:`U`, :math:`V` and :math:`D`, which can be shared between
-several classes.  In contrast, after the enrolment phase, an instance of
+several classes.  In contrast, after the enrollment phase, an instance of
 :py:class:`bob.learn.em.JFAMachine` carries class-specific information about
 the latent variables :math:`y` and :math:`z`.
 
@@ -389,7 +389,7 @@ Furthermore, to make the things even simpler, it is possible to train the K-Mean
 
    >>> data           = numpy.array([[3,-3,100], [4,-4,98], [3.5,-3.5,99], [-7,7,-100], [-5,5,-101]], dtype='float64') #Data
    >>> kmeans_machine = bob.learn.em.KMeansMachine(2, 3) # Create a machine with k=2 clusters with a dimensionality equal to 3
-   >>> kmeans_trainer = bob.learn.em.KMeansTrainer() #Creating the k-means machine   
+   >>> kmeans_trainer = bob.learn.em.KMeansTrainer() #Creating the k-means machine
    >>> max_iterations = 10
    >>> bob.learn.em.train(kmeans_trainer, kmeans_machine, data, max_iterations = 10) #wrapper for the em trainer
    >>> print(kmeans_machine.means)
@@ -431,7 +431,7 @@ be called.
 
    >>> kmeansTrainer = bob.learn.em.KMeansTrainer()
 
-   >>> bob.learn.em.train(kmeansTrainer, kmeans, data, max_iterations = 200, convergence_threshold = 1e-5) # Train the KMeansMachine   
+   >>> bob.learn.em.train(kmeansTrainer, kmeans, data, max_iterations = 200, convergence_threshold = 1e-5) # Train the KMeansMachine
    >>> print(kmeans.means)
    [[ -6.   6.  -100.5]
     [  3.5 -3.5   99. ]]
@@ -600,7 +600,7 @@ the class-specific latent variables :math:`y` and :math:`z`:
    :options: +NORMALIZE_WHITESPACE
 
    >>> m = bob.learn.em.JFAMachine(jfa_base)
-   >>> jfa_trainer.enrol(m, gse, 5) # where 5 is the number of enrollment iterations
+   >>> jfa_trainer.enroll(m, gse, 5) # where 5 is the number of enrollment iterations
 
 More information about the training process can be found in [12]_ and [13]_.
 
@@ -645,7 +645,7 @@ estimate the class-specific latent variable :math:`z`:
    :options: +NORMALIZE_WHITESPACE
 
    >>> m = bob.learn.em.ISVMachine(isv_base)
-   >>> isv_trainer.enrol(m, gse, 5) # where 5 is the number of iterations
+   >>> isv_trainer.enroll(m, gse, 5) # where 5 is the number of iterations
 
 More information about the training process can be found in [14]_ and [13]_.
 
@@ -746,17 +746,17 @@ obtained by calling the
 
 If separate models for different classes need to be enrolled, each of them with
 a set of enrolment samples, then, several instances of
-:py:class:`bob.learn.em.PLDAMachine` need to be created and enroled using
-the :py:meth:`bob.learn.em.PLDATrainer.enrol()` method as follows.
+:py:class:`bob.learn.em.PLDAMachine` need to be created and enrolled using
+the :py:meth:`bob.learn.em.PLDATrainer.enroll()` method as follows.
 
 .. doctest::
 
    >>> plda1 = bob.learn.em.PLDAMachine(pldabase)
    >>> samples1 = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
-   >>> trainer.enrol(plda1, samples1)
+   >>> trainer.enroll(plda1, samples1)
    >>> plda2 = bob.learn.em.PLDAMachine(pldabase)
    >>> samples2 = numpy.array([[3.5,7,-49], [4.5,8.9,-99]], dtype=numpy.float64)
-   >>> trainer.enrol(plda2, samples2)
+   >>> trainer.enroll(plda2, samples2)
 
 Afterwards, the joint log-likelihood of the enrollment samples and of one or
 several test samples can be computed as previously described, and this
@@ -769,12 +769,12 @@ separately for each model.
    >>> l2 = plda2.compute_log_likelihood(sample)
 
 In a verification scenario, there are two possible hypotheses: 1.
-:math:`x_{test}` and :math:`x_{enrol}` share the same class.  2.
-:math:`x_{test}` and :math:`x_{enrol}` are from different classes.  Using the
+:math:`x_{test}` and :math:`x_{enroll}` share the same class.  2.
+:math:`x_{test}` and :math:`x_{enroll}` are from different classes.  Using the
 methods :py:meth:`bob.learn.em.PLDAMachine.forward` or
 :py:meth:`bob.learn.em.PLDAMachine.__call__` function, the corresponding
 log-likelihood ratio will be computed, which is defined in more formal way by:
-:math:`s = \ln(P(x_{test},x_{enrol})) - \ln(P(x_{test})P(x_{enrol}))`
+:math:`s = \ln(P(x_{test},x_{enroll})) - \ln(P(x_{test})P(x_{enroll}))`
 
 .. doctest::
 
@@ -807,4 +807,3 @@ log-likelihood ratio will be computed, which is defined in more formal way by:
 .. [15] http://dx.doi.org/10.1109/TASL.2010.2064307
 .. [16] http://dx.doi.org/10.1109/ICCV.2007.4409052
 .. [17] http://doi.ieeecomputersociety.org/10.1109/TPAMI.2013.38
-
-- 
GitLab