diff --git a/bob/learn/em/cpp/IVectorMachine.cpp b/bob/learn/em/cpp/IVectorMachine.cpp
index f9f4683ac769212648df5c331287ba751591d9bf..f00d938e07848f62f94c06b5e679004fc6f05881 100644
--- a/bob/learn/em/cpp/IVectorMachine.cpp
+++ b/bob/learn/em/cpp/IVectorMachine.cpp
@@ -18,9 +18,10 @@ bob::learn::em::IVectorMachine::IVectorMachine()
 bob::learn::em::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::learn::em::GMMMachine> ubm,
     const size_t rt, const double variance_threshold):
   m_ubm(ubm), m_rt(rt),
-  m_T(getSupervectorLength(),rt), m_sigma(getSupervectorLength()),
+  m_T(getSupervectorLength(),rt),  m_sigma(getSupervectorLength()),
   m_variance_threshold(variance_threshold)
 {
+  m_sigma = 0.0;
   resizePrecompute();
 }
 
@@ -153,6 +154,7 @@ void bob::learn::em::IVectorMachine::precompute()
     blitz::Range rall = blitz::Range::all();
     const int C = (int)m_ubm->getNGaussians();
     const int D = (int)m_ubm->getNInputs();
+
     // T_{c}^{T}.sigma_{c}^{-1}
     for (int c=0; c<C; ++c)
     {
diff --git a/bob/learn/em/empca_trainer.cpp b/bob/learn/em/empca_trainer.cpp
index f197713852fe39c13691867f2fa5445d82b62755..a24af2f4314761366c4b3192d9d3867d6228818d 100644
--- a/bob/learn/em/empca_trainer.cpp
+++ b/bob/learn/em/empca_trainer.cpp
@@ -139,51 +139,7 @@ static PyObject* PyBobLearnEMEMPCATrainer_RichCompare(PyBobLearnEMEMPCATrainerOb
 /************ Variables Section ***********************************/
 /******************************************************************/
 
-
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnEMEMPCATrainer_getRng(PyBobLearnEMEMPCATrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("N", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnEMEMPCATrainer_setRng(PyBobLearnEMEMPCATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
-
 static PyGetSetDef PyBobLearnEMEMPCATrainer_getseters[] = { 
-  {
-   rng.name(),
-   (getter)PyBobLearnEMEMPCATrainer_getRng,
-   (setter)PyBobLearnEMEMPCATrainer_setRng,
-   rng.doc(),
-   0
-  },
   {0}  // Sentinel
 };
 
@@ -201,7 +157,8 @@ static auto initialize = bob::extension::FunctionDoc(
 )
 .add_prototype("linear_machine,data")
 .add_parameter("linear_machine", ":py:class:`bob.learn.linear.Machine`", "LinearMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
+.add_parameter("data", "array_like <float, 2D>", "Input data")
+.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
 static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -209,11 +166,19 @@ static PyObject* PyBobLearnEMEMPCATrainer_initialize(PyBobLearnEMEMPCATrainerObj
   char** kwlist = initialize.kwlist(0);
 
   PyBobLearnLinearMachineObject* linear_machine = 0;
-  PyBlitzArrayObject* data                          = 0;
+  PyBlitzArrayObject* data                      = 0;
+  PyBoostMt19937Object* rng = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
-                                                                 &PyBlitzArray_Converter, &data)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&|O!", kwlist, &PyBobLearnLinearMachine_Type, &linear_machine,
+                                                                 &PyBlitzArray_Converter, &data,
+                                                                 &PyBoostMt19937_Type, &rng)) return 0;
   auto data_ = make_safe(data);
+  
+  if(rng){
+    boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
+    self->cxx->setRng(rng_cpy);
+  }
+  
 
   self->cxx->initialize(*linear_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
 
diff --git a/bob/learn/em/gaussian.cpp b/bob/learn/em/gaussian.cpp
index 267c656f00334cc88178be173da4d080a35cc787..01327ea08e249c25bde199147a44d946f44f3638 100644
--- a/bob/learn/em/gaussian.cpp
+++ b/bob/learn/em/gaussian.cpp
@@ -75,18 +75,9 @@ static int PyBobLearnEMGaussian_init_hdf5(PyBobLearnEMGaussianObject* self, PyOb
     Gaussian_doc.print_usage();
     return -1;
   }
+  auto config_ = make_safe(config);
   
-  try {
-    self->cxx.reset(new bob::learn::em::Gaussian(*(config->f)));
-  }
-  catch (std::exception& ex) {
-    PyErr_SetString(PyExc_RuntimeError, ex.what());
-    return -1;
-  }
-  catch (...) {
-    PyErr_Format(PyExc_RuntimeError, "cannot create new object of type `%s' - unknown exception thrown", Py_TYPE(self)->tp_name);
-    return -1;
-  }
+  self->cxx.reset(new bob::learn::em::Gaussian(*(config->f)));
 
   return 0;
 }
@@ -170,7 +161,7 @@ int PyBobLearnEMGaussian_Check(PyObject* o) {
 /***** MEAN *****/
 static auto mean = bob::extension::VariableDoc(
   "mean",
-  "array_like <double, 1D>",
+  "array_like <float, 1D>",
   "Mean of the Gaussian",
   ""
 );
@@ -181,13 +172,30 @@ PyObject* PyBobLearnEMGaussian_getMean(PyBobLearnEMGaussianObject* self, void*){
 }
 int PyBobLearnEMGaussian_setMean(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
   BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
+  PyBlitzArrayObject* input;
+  if (!PyBlitzArray_Converter(value, &input)){
     PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, mean.name());
     return -1;
   }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "mean");
+  
+  // perform check on the input  
+  if (input->type_num != NPY_FLOAT64){
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, mean.name());
+    return -1;
+  }  
+
+  if (input->ndim != 1){
+    PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, mean.name());
+    return -1;
+  }  
+
+  if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
+    PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], mean.name());
+    return -1;
+  }  
+
+  auto o_ = make_safe(input);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "mean");
   if (!b) return -1;
   self->cxx->setMean(*b);
   return 0;
@@ -197,7 +205,7 @@ int PyBobLearnEMGaussian_setMean(PyBobLearnEMGaussianObject* self, PyObject* val
 /***** Variance *****/
 static auto variance = bob::extension::VariableDoc(
   "variance",
-  "array_like <double, 1D>",
+  "array_like <float, 1D>",
   "Variance of the Gaussian",
   ""
 );
@@ -208,13 +216,30 @@ PyObject* PyBobLearnEMGaussian_getVariance(PyBobLearnEMGaussianObject* self, voi
 }
 int PyBobLearnEMGaussian_setVariance(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
   BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
+  PyBlitzArrayObject* input;
+  if (!PyBlitzArray_Converter(value, &input)){
     PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, variance.name());
     return -1;
   }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance");
+  auto input_ = make_safe(input);
+  
+  // perform check on the input
+  if (input->type_num != NPY_FLOAT64){
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variance.name());
+    return -1;
+  }  
+
+  if (input->ndim != 1){
+    PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variance.name());
+    return -1;
+  }  
+
+  if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
+    PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], variance.name());
+    return -1;
+  }
+
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "variance");
   if (!b) return -1;
   self->cxx->setVariance(*b);
   return 0;
@@ -225,7 +250,7 @@ int PyBobLearnEMGaussian_setVariance(PyBobLearnEMGaussianObject* self, PyObject*
 /***** variance_thresholds *****/
 static auto variance_thresholds = bob::extension::VariableDoc(
   "variance_thresholds",
-  "array_like <double, 1D>",
+  "array_like <float, 1D>",
   "The variance flooring thresholds, i.e. the minimum allowed value of variance in each dimension. ",
   "The variance will be set to this value if an attempt is made to set it to a smaller value."
 );
@@ -236,13 +261,31 @@ PyObject* PyBobLearnEMGaussian_getVarianceThresholds(PyBobLearnEMGaussianObject*
 }
 int PyBobLearnEMGaussian_setVarianceThresholds(PyBobLearnEMGaussianObject* self, PyObject* value, void*){
   BOB_TRY
-  PyBlitzArrayObject* o;
-  if (!PyBlitzArray_Converter(value, &o)){
+  PyBlitzArrayObject* input;
+  if (!PyBlitzArray_Converter(value, &input)){
     PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, variance_thresholds.name());
     return -1;
+  }      
+
+  auto input_ = make_safe(input);
+
+  // perform check on the input
+  if (input->type_num != NPY_FLOAT64){
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `%s`", Py_TYPE(self)->tp_name, variance_thresholds.name());
+    return -1;
+  }  
+
+  if (input->ndim != 1){
+    PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64 for `%s`", Py_TYPE(self)->tp_name, variance_thresholds.name());
+    return -1;
+  }  
+
+  if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
+    PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d for `%s`", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0], variance_thresholds.name());
+    return -1;
   }
-  auto o_ = make_safe(o);
-  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "variance_thresholds");
+  
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(input, "variance_thresholds");
   if (!b) return -1;
   self->cxx->setVarianceThresholds(*b);
   return 0;
@@ -336,7 +379,7 @@ static auto log_likelihood = bob::extension::FunctionDoc(
   true
 )
 .add_prototype("input","output")
-.add_parameter("input", "array_like <double, 1D>", "Input vector")
+.add_parameter("input", "array_like <float, 1D>", "Input vector")
 .add_return("output","float","The log likelihood");
 static PyObject* PyBobLearnEMGaussian_loglikelihood(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
@@ -349,6 +392,25 @@ static PyObject* PyBobLearnEMGaussian_loglikelihood(PyBobLearnEMGaussianObject*
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
 
+  // perform check on the input
+  if (input->type_num != NPY_FLOAT64){
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `input`", Py_TYPE(self)->tp_name);
+    log_likelihood.print_usage();
+    return 0;
+  }  
+
+  if (input->ndim != 1){
+    PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64", Py_TYPE(self)->tp_name);
+    log_likelihood.print_usage();
+    return 0;
+  }  
+
+  if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
+    PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0]);
+    log_likelihood.print_usage();
+    return 0;
+  }  
+
   double value = self->cxx->logLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
   return Py_BuildValue("d", value);
 
@@ -362,8 +424,8 @@ static auto log_likelihood_ = bob::extension::FunctionDoc(
   "Output the log likelihood given a sample. The input size is NOT checked."
 )
 .add_prototype("input","output")
-.add_parameter("input", "array_like <double, 1D>", "Input vector")
-.add_return("output","double","The log likelihood");
+.add_parameter("input", "array_like <float, 1D>", "Input vector")
+.add_return("output","float","The log likelihood");
 static PyObject* PyBobLearnEMGaussian_loglikelihood_(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
   char** kwlist = log_likelihood_.kwlist(0);
@@ -373,6 +435,25 @@ static PyObject* PyBobLearnEMGaussian_loglikelihood_(PyBobLearnEMGaussianObject*
   //protects acquired resources through this scope
   auto input_ = make_safe(input);
 
+  // perform check on the input
+  if (input->type_num != NPY_FLOAT64){
+    PyErr_Format(PyExc_TypeError, "`%s' only supports 64-bit float arrays for input array `input`", Py_TYPE(self)->tp_name);
+    log_likelihood.print_usage();
+    return 0;
+  }  
+
+  if (input->ndim != 1){
+    PyErr_Format(PyExc_TypeError, "`%s' only processes 1D arrays of float64", Py_TYPE(self)->tp_name);
+    log_likelihood.print_usage();
+    return 0;
+  }  
+
+  if (input->shape[0] != (Py_ssize_t)self->cxx->getNInputs()){
+    PyErr_Format(PyExc_TypeError, "`%s' 1D `input` array should have %" PY_FORMAT_SIZE_T "d elements, not %" PY_FORMAT_SIZE_T "d", Py_TYPE(self)->tp_name, self->cxx->getNInputs(), input->shape[0]);
+    log_likelihood.print_usage();
+    return 0;
+  }  
+
   double value = self->cxx->logLikelihood_(*PyBlitzArrayCxx_AsBlitz<double,1>(input));
   return Py_BuildValue("d", value);
 
@@ -386,8 +467,7 @@ static auto save = bob::extension::FunctionDoc(
   "Save the configuration of the Gassian Machine to a given HDF5 file"
 )
 .add_prototype("hdf5")
-.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing")
-;
+.add_parameter("hdf5", ":py:class:`bob.io.base.HDF5File`", "An HDF5 file open for writing");
 static PyObject* PyBobLearnEMGaussian_Save(PyBobLearnEMGaussianObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
   
diff --git a/bob/learn/em/gmm_stats.cpp b/bob/learn/em/gmm_stats.cpp
index 9bf07bdf981e4cb696c31fc803e295344c1ce980..dd05a86d41b05d8be1fae63a46973b857578c28e 100644
--- a/bob/learn/em/gmm_stats.cpp
+++ b/bob/learn/em/gmm_stats.cpp
@@ -84,23 +84,13 @@ static int PyBobLearnEMGMMStats_init_hdf5(PyBobLearnEMGMMStatsObject* self, PyOb
 
   char** kwlist = GMMStats_doc.kwlist(2);
 
-  /*
   PyBobIoHDF5FileObject* config = 0;
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBobIoHDF5File_Converter, &config)){
     GMMStats_doc.print_usage();
     return -1;
   }
-  */
-
-  PyObject* config = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobIoHDF5File_Type, &config)){
-    GMMStats_doc.print_usage();
-    return -1;
-  }
-  
-  auto h5f = reinterpret_cast<PyBobIoHDF5FileObject*>(config);  
-  
-  self->cxx.reset(new bob::learn::em::GMMStats(*(h5f->f)));
+  auto config_ = make_safe(config);
+  self->cxx.reset(new bob::learn::em::GMMStats(*(config->f)));
 
   return 0;
 }
diff --git a/bob/learn/em/include/bob.learn.em/PLDATrainer.h b/bob/learn/em/include/bob.learn.em/PLDATrainer.h
index 7ea35089ad4f321b35142ca8e9519592a1a892af..3fae06ff140b611c83993d57f0976bd0212d44bc 100644
--- a/bob/learn/em/include/bob.learn.em/PLDATrainer.h
+++ b/bob/learn/em/include/bob.learn.em/PLDATrainer.h
@@ -222,43 +222,43 @@ class PLDATrainer
     /**
      * @brief Sets the Random Number Generator
      */
-    void setRng(const boost::shared_ptr<boost::mt19937> rng)
+    void setRng(boost::shared_ptr<boost::mt19937> rng)
     { m_rng = rng; }
 
     /**
      * @brief Gets the Random Number Generator
      */
-    const boost::shared_ptr<boost::mt19937> getRng() const
+    boost::shared_ptr<boost::mt19937> getRng() const
     { return m_rng; }      
 
   private:
   
-    boost::shared_ptr<boost::mt19937> m_rng;
-  
-    //representation
-    size_t m_dim_d; ///< Dimensionality of the input features
-    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
-    size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
-    bool m_use_sum_second_order; ///< If set, only the sum of the second order statistics is stored/allocated
-    InitFMethod m_initF_method; ///< Initialization method for \f$F\f$
-    double m_initF_ratio; ///< Ratio/factor used for the initialization of \f$F\f$
-    InitGMethod m_initG_method; ///< Initialization method for \f$G\f$
-    double m_initG_ratio; ///< Ratio/factor used for the initialization of \f$G\f$
-    InitSigmaMethod m_initSigma_method; ///< Initialization method for \f$\Sigma\f$
-    double m_initSigma_ratio; ///< Ratio/factor used for the initialization of \f$\Sigma\f$
+	    boost::shared_ptr<boost::mt19937> m_rng;
+	  
+	    //representation
+	    size_t m_dim_d; ///< Dimensionality of the input features
+	    size_t m_dim_f; ///< Size/rank of the \f$F\f$ subspace
+	    size_t m_dim_g; ///< Size/rank of the \f$G\f$ subspace
+	    bool m_use_sum_second_order; ///< If set, only the sum of the second order statistics is stored/allocated
+	    InitFMethod m_initF_method; ///< Initialization method for \f$F\f$
+	    double m_initF_ratio; ///< Ratio/factor used for the initialization of \f$F\f$
+	    InitGMethod m_initG_method; ///< Initialization method for \f$G\f$
+	    double m_initG_ratio; ///< Ratio/factor used for the initialization of \f$G\f$
+	    InitSigmaMethod m_initSigma_method; ///< Initialization method for \f$\Sigma\f$
+	    double m_initSigma_ratio; ///< Ratio/factor used for the initialization of \f$\Sigma\f$
 
-    // Statistics and covariance computed during the training process
-    blitz::Array<double,2> m_cache_S; ///< Covariance of the training data
-    std::vector<blitz::Array<double,2> > m_cache_z_first_order; ///< Current mean of the z_{n} latent variable (1 for each sample)
-    blitz::Array<double,2> m_cache_sum_z_second_order; ///< Current sum of the covariance of the z_{n} latent variable
-    std::vector<blitz::Array<double,3> > m_cache_z_second_order; ///< Current covariance of the z_{n} latent variable
-    // Precomputed
-    /**
-     * @brief Number of training samples for each individual in the training set
-     */
-    std::vector<size_t> m_cache_n_samples_per_id;
-    /**
-     * @brief Tells if there is an identity with a 'key'/particular number of
+	    // Statistics and covariance computed during the training process
+	    blitz::Array<double,2> m_cache_S; ///< Covariance of the training data
+	    std::vector<blitz::Array<double,2> > m_cache_z_first_order; ///< Current mean of the z_{n} latent variable (1 for each sample)
+	    blitz::Array<double,2> m_cache_sum_z_second_order; ///< Current sum of the covariance of the z_{n} latent variable
+	    std::vector<blitz::Array<double,3> > m_cache_z_second_order; ///< Current covariance of the z_{n} latent variable
+	    // Precomputed
+	    /**
+	     * @brief Number of training samples for each individual in the training set
+	     */
+	    std::vector<size_t> m_cache_n_samples_per_id;
+	    /**
+	     * @brief Tells if there is an identity with a 'key'/particular number of
      * training samples, and if corresponding matrices are up to date.
      */
     std::map<size_t,bool> m_cache_n_samples_in_training;
diff --git a/bob/learn/em/isv_base.cpp b/bob/learn/em/isv_base.cpp
index 28e86d5df408e4d5dab2bddf787de0bd9f1b3d12..850fc3fb251a7108496c96105a7f65d4c5fc2dff 100644
--- a/bob/learn/em/isv_base.cpp
+++ b/bob/learn/em/isv_base.cpp
@@ -62,6 +62,7 @@ static int PyBobLearnEMISVBase_init_hdf5(PyBobLearnEMISVBaseObject* self, PyObje
     ISVBase_doc.print_usage();
     return -1;
   }
+  auto config_ = make_safe(config);
 
   self->cxx.reset(new bob::learn::em::ISVBase(*(config->f)));
 
diff --git a/bob/learn/em/isv_machine.cpp b/bob/learn/em/isv_machine.cpp
index 00f4fcdc3286040c7939fd627ad410369f91bbc1..6de1fe0e99bbb0b5e0e4a345a20ca92f679a432f 100644
--- a/bob/learn/em/isv_machine.cpp
+++ b/bob/learn/em/isv_machine.cpp
@@ -59,7 +59,7 @@ static int PyBobLearnEMISVMachine_init_hdf5(PyBobLearnEMISVMachineObject* self,
     ISVMachine_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::ISVMachine(*(config->f)));
 
   return 0;
diff --git a/bob/learn/em/isv_trainer.cpp b/bob/learn/em/isv_trainer.cpp
index dc357b7dac94db7aa6a94462fc274a9e1b4478a2..1d594a1cb829181d0afb8c0f7f73c84b92c99da1 100644
--- a/bob/learn/em/isv_trainer.cpp
+++ b/bob/learn/em/isv_trainer.cpp
@@ -325,41 +325,6 @@ int PyBobLearnEMISVTrainer_set_Z(PyBobLearnEMISVTrainerObject* self, PyObject* v
 }
 
 
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnEMISVTrainer_getRng(PyBobLearnEMISVTrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("N", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnEMISVTrainer_setRng(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
 static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = { 
   {
    acc_u_a1.name(),
@@ -390,14 +355,6 @@ static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = {
    0
   },
   
-  {
-   rng.name(),
-   (getter)PyBobLearnEMISVTrainer_getRng,
-   (setter)PyBobLearnEMISVTrainer_setRng,
-   rng.doc(),
-   0
-  },
-  
 
   {0}  // Sentinel
 };
@@ -414,9 +371,10 @@ static auto initialize = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("isv_base,stats")
+.add_prototype("isv_base,stats,rng")
 .add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
-.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object")
+.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
 static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -425,9 +383,16 @@ static PyObject* PyBobLearnEMISVTrainer_initialize(PyBobLearnEMISVTrainerObject*
 
   PyBobLearnEMISVBaseObject* isv_base = 0;
   PyObject* stats = 0;
+  PyBoostMt19937Object* rng = 0;  
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
-                                                                 &PyList_Type, &stats)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!|O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
+                                                                 &PyList_Type, &stats,
+                                                                 &PyBoostMt19937_Type, &rng)) return 0;
+
+  if(rng){
+    boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
+    self->cxx->setRng(rng_cpy);
+  }
 
   std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
   if(extract_GMMStats_2d(stats ,training_data)==0)
diff --git a/bob/learn/em/ivector_machine.cpp b/bob/learn/em/ivector_machine.cpp
index 73d4d96745415d7e42a7a17708e7849a221896a5..257aafeef21dd96981b84f545e22241ac2997003 100644
--- a/bob/learn/em/ivector_machine.cpp
+++ b/bob/learn/em/ivector_machine.cpp
@@ -25,7 +25,7 @@ static auto IVectorMachine_doc = bob::extension::ClassDoc(
     "",
     true
   )
-  .add_prototype("ubm, rt, variance_threshold","")
+  .add_prototype("ubm,rt,variance_threshold","")
   .add_prototype("other","")
   .add_prototype("hdf5","")
 
@@ -62,7 +62,7 @@ static int PyBobLearnEMIVectorMachine_init_hdf5(PyBobLearnEMIVectorMachineObject
     IVectorMachine_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::IVectorMachine(*(config->f)));
 
   return 0;
@@ -78,7 +78,8 @@ static int PyBobLearnEMIVectorMachine_init_ubm(PyBobLearnEMIVectorMachineObject*
   double variance_threshold = 1e-10;
 
   //Here we have to select which keyword argument to read  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, &rt, &variance_threshold)){
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!i|d", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine, 
+                                                                  &rt, &variance_threshold)){
     IVectorMachine_doc.print_usage();
     return -1;
   }
@@ -625,15 +626,6 @@ static PyMethodDef PyBobLearnEMIVectorMachine_methods[] = {
     __compute_TtSigmaInvFnorm__.doc()
   },
 
-/*
-  {
-    forward.name(),
-    (PyCFunction)PyBobLearnEMIVectorMachine_Forward,
-    METH_VARARGS|METH_KEYWORDS,
-    forward.doc()
-  },*/
-
-
   {0} /* Sentinel */
 };
 
diff --git a/bob/learn/em/jfa_base.cpp b/bob/learn/em/jfa_base.cpp
index 7637cc6c37516c110efe8a05010f245a53cfb863..27c7136fb5eec9af9943a57dbc9f88d5d746e919 100644
--- a/bob/learn/em/jfa_base.cpp
+++ b/bob/learn/em/jfa_base.cpp
@@ -62,7 +62,7 @@ static int PyBobLearnEMJFABase_init_hdf5(PyBobLearnEMJFABaseObject* self, PyObje
     JFABase_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::JFABase(*(config->f)));
 
   return 0;
diff --git a/bob/learn/em/jfa_machine.cpp b/bob/learn/em/jfa_machine.cpp
index 29f2a34cad34499db09f330907335baf53329383..d2f947689c8f5e490e3e8f04982c76e592aaabe9 100644
--- a/bob/learn/em/jfa_machine.cpp
+++ b/bob/learn/em/jfa_machine.cpp
@@ -59,7 +59,7 @@ static int PyBobLearnEMJFAMachine_init_hdf5(PyBobLearnEMJFAMachineObject* self,
     JFAMachine_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::JFAMachine(*(config->f)));
 
   return 0;
diff --git a/bob/learn/em/jfa_trainer.cpp b/bob/learn/em/jfa_trainer.cpp
index f0a7a168811da767ef9f30aa31aeccb0045896f6..1ec37821e6933f21610c41ede41bef65ebad959e 100644
--- a/bob/learn/em/jfa_trainer.cpp
+++ b/bob/learn/em/jfa_trainer.cpp
@@ -431,40 +431,6 @@ int PyBobLearnEMJFATrainer_set_Z(PyBobLearnEMJFATrainerObject* self, PyObject* v
 
 
 
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnEMJFATrainer_getRng(PyBobLearnEMJFATrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("N", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnEMJFATrainer_setRng(PyBobLearnEMJFATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
 static PyGetSetDef PyBobLearnEMJFATrainer_getseters[] = { 
   {
    acc_v_a1.name(),
@@ -508,13 +474,6 @@ static PyGetSetDef PyBobLearnEMJFATrainer_getseters[] = {
    acc_d_a2.doc(),
    0
   },
-  {
-   rng.name(),
-   (getter)PyBobLearnEMJFATrainer_getRng,
-   (setter)PyBobLearnEMJFATrainer_setRng,
-   rng.doc(),
-   0
-  },
   {
    __X__.name(),
    (getter)PyBobLearnEMJFATrainer_get_X,
@@ -554,9 +513,10 @@ static auto initialize = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("jfa_base,stats")
+.add_prototype("jfa_base,stats,rng")
 .add_parameter("jfa_base", ":py:class:`bob.learn.em.JFABase`", "JFABase Object")
-.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object");
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "GMMStats Object")
+.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
 static PyObject* PyBobLearnEMJFATrainer_initialize(PyBobLearnEMJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -565,9 +525,16 @@ static PyObject* PyBobLearnEMJFATrainer_initialize(PyBobLearnEMJFATrainerObject*
 
   PyBobLearnEMJFABaseObject* jfa_base = 0;
   PyObject* stats = 0;
+  PyBoostMt19937Object* rng = 0;  
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
-                                                                 &PyList_Type, &stats)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!|O!", kwlist, &PyBobLearnEMJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats,
+                                                                 &PyBoostMt19937_Type, &rng)) return 0;
+
+  if(rng){
+    boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
+    self->cxx->setRng(rng_cpy);
+  }
 
   std::vector<std::vector<boost::shared_ptr<bob::learn::em::GMMStats> > > training_data;
   if(extract_GMMStats_2d(stats ,training_data)==0)
diff --git a/bob/learn/em/kmeans_machine.cpp b/bob/learn/em/kmeans_machine.cpp
index 81492109b66b6d4f42aaf943601e8b49aa8ad3c2..a7bca528a2f5a2daadc1f5bbb20e0f37a86350b0 100644
--- a/bob/learn/em/kmeans_machine.cpp
+++ b/bob/learn/em/kmeans_machine.cpp
@@ -86,7 +86,7 @@ static int PyBobLearnEMKMeansMachine_init_hdf5(PyBobLearnEMKMeansMachineObject*
     KMeansMachine_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::KMeansMachine(*(config->f)));
 
   return 0;
diff --git a/bob/learn/em/kmeans_trainer.cpp b/bob/learn/em/kmeans_trainer.cpp
index ffe7629af0208a7aaca5c9ea7ecd7dcb0886e550..13833e089a1a489da69a4219372aa873c943a5e9 100644
--- a/bob/learn/em/kmeans_trainer.cpp
+++ b/bob/learn/em/kmeans_trainer.cpp
@@ -266,41 +266,6 @@ int PyBobLearnEMKMeansTrainer_setAverageMinDistance(PyBobLearnEMKMeansTrainerObj
 
 
 
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnEMKMeansTrainer_getRng(PyBobLearnEMKMeansTrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("N", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnEMKMeansTrainer_setRng(PyBobLearnEMKMeansTrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* boostObject = 0;
-  PyBoostMt19937_Converter(value, &boostObject);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
 
 static PyGetSetDef PyBobLearnEMKMeansTrainer_getseters[] = { 
   {
@@ -331,13 +296,6 @@ static PyGetSetDef PyBobLearnEMKMeansTrainer_getseters[] = {
    average_min_distance.doc(),
    0
   },
-  {
-   rng.name(),
-   (getter)PyBobLearnEMKMeansTrainer_getRng,
-   (setter)PyBobLearnEMKMeansTrainer_setRng,
-   rng.doc(),
-   0
-  },
   {0}  // Sentinel
 };
 
@@ -353,9 +311,10 @@ static auto initialize = bob::extension::FunctionDoc(
   "Data is split into as many chunks as there are means, then each mean is set to a random example within each chunk.",
   true
 )
-.add_prototype("kmeans_machine,data")
+.add_prototype("kmeans_machine,data, rng")
 .add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
+.add_parameter("data", "array_like <float, 2D>", "Input data")
+.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
 static PyObject* PyBobLearnEMKMeansTrainer_initialize(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -363,12 +322,19 @@ static PyObject* PyBobLearnEMKMeansTrainer_initialize(PyBobLearnEMKMeansTrainerO
   char** kwlist = initialize.kwlist(0);
 
   PyBobLearnEMKMeansMachineObject* kmeans_machine = 0;
-  PyBlitzArrayObject* data                          = 0;
+  PyBlitzArrayObject* data                        = 0;
+  PyBoostMt19937Object* rng = 0;  
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine,
-                                                                 &PyBlitzArray_Converter, &data)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&|O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine,
+                                                                 &PyBlitzArray_Converter, &data,
+                                                                  &PyBoostMt19937_Type, &rng)) return 0;
   auto data_ = make_safe(data);
 
+  if(rng){
+    boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
+    self->cxx->setRng(rng_cpy);
+  }
+
   self->cxx->initialize(*kmeans_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
 
   BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
diff --git a/bob/learn/em/plda_base.cpp b/bob/learn/em/plda_base.cpp
index 5d48a169bb18ac4b462d3a625110c4ab8080e5a9..5e1b9d62b87bb45098f399de4e15e783a886d89a 100644
--- a/bob/learn/em/plda_base.cpp
+++ b/bob/learn/em/plda_base.cpp
@@ -70,7 +70,7 @@ static int PyBobLearnEMPLDABase_init_hdf5(PyBobLearnEMPLDABaseObject* self, PyOb
     PLDABase_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::PLDABase(*(config->f)));
 
   return 0;
diff --git a/bob/learn/em/plda_machine.cpp b/bob/learn/em/plda_machine.cpp
index 6f73f4dc4fdef37d73d1fefbd0a4c638738da162..734cb58a428c34dcf9622dd275039a808ef8f962 100644
--- a/bob/learn/em/plda_machine.cpp
+++ b/bob/learn/em/plda_machine.cpp
@@ -67,7 +67,7 @@ static int PyBobLearnEMPLDAMachine_init_hdf5(PyBobLearnEMPLDAMachineObject* self
     PLDAMachine_doc.print_usage();
     return -1;
   }
-
+  auto config_ = make_safe(config);
   self->cxx.reset(new bob::learn::em::PLDAMachine(*(config->f),plda_base->cxx));
 
   return 0;
@@ -635,31 +635,23 @@ static auto compute_log_likelihood = bob::extension::FunctionDoc(
   true
 )
 .add_prototype("sample,with_enrolled_samples","output")
-.add_parameter("sample", "array_like <float, 1D>", "Sample")
+.add_parameter("sample", "array_like <float, 1D>,array_like <float, 2D>", "Sample")
 .add_parameter("with_enrolled_samples", "bool", "")
-.add_return("output","double","The log-likelihood");
+.add_return("output","float","The log-likelihood");
 static PyObject* PyBobLearnEMPLDAMachine_computeLogLikelihood(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
   
   char** kwlist = compute_log_likelihood.kwlist(0);
   
   PyBlitzArrayObject* samples;
-  PyArrayObject* numpy_samples; 
   PyObject* with_enrolled_samples = Py_True;
    
-  /*Convert to PyObject first to access the number of dimensions*/
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!", kwlist, &PyArray_Type, &numpy_samples,
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&|O!", kwlist, &PyBlitzArray_Converter, &samples,
                                                                   &PyBool_Type, &with_enrolled_samples)) return 0;
-  auto numpy_samples_ = make_safe(numpy_samples);
-  Py_INCREF(numpy_samples);
-  int dim = PyArray_NDIM(numpy_samples);
-  
-  /*Now converting to PyBlitzArrayObject*/
-  samples = reinterpret_cast<PyBlitzArrayObject*>(PyBlitzArray_FromNumpyArray(numpy_samples));
   auto samples_ = make_safe(samples);
   
   /*Using the proper method according to the dimension*/
-  if (dim==1)
+  if (samples->ndim==1)
     return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,1>(samples), f(with_enrolled_samples)));
   else
     return Py_BuildValue("d",self->cxx->computeLogLikelihood(*PyBlitzArrayCxx_AsBlitz<double,2>(samples), f(with_enrolled_samples)));
@@ -677,28 +669,21 @@ static auto forward = bob::extension::FunctionDoc(
   true
 )
 .add_prototype("samples","output")
-.add_parameter("samples", "array_like <float, 1D>", "Sample")
-.add_return("output","double","The log-likelihood ratio");
+.add_parameter("samples", "array_like <float, 1D>,array_like <float, 2D>", "Sample")
+.add_return("output","float","The log-likelihood ratio");
 static PyObject* PyBobLearnEMPLDAMachine_forward(PyBobLearnEMPLDAMachineObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
   
   char** kwlist = forward.kwlist(0);
 
   PyBlitzArrayObject* samples;
-  PyArrayObject* numpy_samples; 
 
   /*Convert to PyObject first to access the number of dimensions*/
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyArray_Type, &numpy_samples)) return 0;
-  auto numpy_samples_ = make_safe(numpy_samples);
-  Py_INCREF(numpy_samples);
-  int dim = PyArray_NDIM(numpy_samples);
-  
-  /*Now converting to PyBlitzArrayObject*/
-  samples = reinterpret_cast<PyBlitzArrayObject*>(PyBlitzArray_FromNumpyArray(numpy_samples));
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&", kwlist, &PyBlitzArray_Converter, &samples)) return 0;
   auto samples_ = make_safe(samples);
 
    //There are 2 methods in C++, one <double,1> and the another <double,2>
-  if(dim==1)
+  if(samples->ndim==1)
     return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,1>(samples)));
   else
     return Py_BuildValue("d",self->cxx->forward(*PyBlitzArrayCxx_AsBlitz<double,2>(samples)));
diff --git a/bob/learn/em/plda_trainer.cpp b/bob/learn/em/plda_trainer.cpp
index 2661a27961b0ce214e4253c8ba4cdd1d46151cfb..92271f47a8cda10a587e8089a9dadb346ae8cd4b 100644
--- a/bob/learn/em/plda_trainer.cpp
+++ b/bob/learn/em/plda_trainer.cpp
@@ -256,41 +256,6 @@ PyObject* PyBobLearnEMPLDATrainer_get_z_first_order(PyBobLearnEMPLDATrainerObjec
 }
 
 
-/***** rng *****/
-static auto rng = bob::extension::VariableDoc(
-  "rng",
-  "str",
-  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
-  ""
-);
-PyObject* PyBobLearnEMPLDATrainer_getRng(PyBobLearnEMPLDATrainerObject* self, void*) {
-  BOB_TRY
-  //Allocating the correspondent python object
-  
-  PyBoostMt19937Object* retval =
-    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
-
-  retval->rng = self->cxx->getRng().get();
-  return Py_BuildValue("O", retval);
-  BOB_CATCH_MEMBER("Rng method could not be read", 0)
-}
-int PyBobLearnEMPLDATrainer_setRng(PyBobLearnEMPLDATrainerObject* self, PyObject* value, void*) {
-  BOB_TRY
-
-  if (!PyBoostMt19937_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
-    return -1;
-  }
-
-  PyBoostMt19937Object* rng_object = 0;
-  PyArg_Parse(value, "O!", &PyBoostMt19937_Type, &rng_object);
-  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)rng_object->rng);
-
-  return 0;
-  BOB_CATCH_MEMBER("Rng could not be set", 0)
-}
-
-
 /***** init_f_method *****/
 static auto init_f_method = bob::extension::VariableDoc(
   "init_f_method",
@@ -417,13 +382,6 @@ static PyGetSetDef PyBobLearnEMPLDATrainer_getseters[] = {
    z_second_order.doc(),
    0
   },
-  {
-   rng.name(),
-   (getter)PyBobLearnEMPLDATrainer_getRng,
-   (setter)PyBobLearnEMPLDATrainer_setRng,
-   rng.doc(),
-   0
-  },
   {
    init_f_method.name(),
    (getter)PyBobLearnEMPLDATrainer_getFMethod,
@@ -467,9 +425,10 @@ static auto initialize = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("plda_base,data")
+.add_prototype("plda_base,data,rng")
 .add_parameter("plda_base", ":py:class:`bob.learn.em.PLDABase`", "PLDAMachine Object")
-.add_parameter("data", "list", "");
+.add_parameter("data", "list", "")
+.add_parameter("rng", ":py:class:`bob.core.random.mt19937`", "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.");
 static PyObject* PyBobLearnEMPLDATrainer_initialize(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -478,13 +437,21 @@ static PyObject* PyBobLearnEMPLDATrainer_initialize(PyBobLearnEMPLDATrainerObjec
 
   PyBobLearnEMPLDABaseObject* plda_base = 0;
   PyObject* data = 0;
+  PyBoostMt19937Object* rng = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base,
-                                                                 &PyList_Type, &data)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!|O!", kwlist, &PyBobLearnEMPLDABase_Type, &plda_base,
+                                                                 &PyList_Type, &data,
+                                                                 &PyBoostMt19937_Type, &rng)) return 0;
 
   std::vector<blitz::Array<double,2> > data_vector;
-  if(list_as_vector(data ,data_vector)==0)
+  if(list_as_vector(data ,data_vector)==0){
+    if(rng){
+      boost::shared_ptr<boost::mt19937> rng_cpy = (boost::shared_ptr<boost::mt19937>)new boost::mt19937(*rng->rng);
+      self->cxx->setRng(rng_cpy);
+    }
+
     self->cxx->initialize(*plda_base->cxx, data_vector);
+  }
 
   BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
 
diff --git a/bob/learn/em/test/test_jfa_trainer.py b/bob/learn/em/test/test_jfa_trainer.py
index 1bd520e1c9542f40e66e3478588540b3147e525d..afb46139cbc66a6cae92558f89f0a310e993f6ab 100644
--- a/bob/learn/em/test/test_jfa_trainer.py
+++ b/bob/learn/em/test/test_jfa_trainer.py
@@ -268,16 +268,15 @@ def test_JFATrainInitialize():
   # first round
   rng = bob.core.random.mt19937(0)
   jt = JFATrainer()
-  jt.rng = rng
-  jt.initialize(jb, TRAINING_STATS)
+  #jt.rng = rng
+  jt.initialize(jb, TRAINING_STATS, rng)
   u1 = jb.u
   v1 = jb.v
   d1 = jb.d
 
   # second round
   rng = bob.core.random.mt19937(0)
-  jt.rng = rng
-  jt.initialize(jb, TRAINING_STATS)
+  jt.initialize(jb, TRAINING_STATS, rng)
   u2 = jb.u
   v2 = jb.v
   d2 = jb.d
@@ -301,15 +300,15 @@ def test_ISVTrainInitialize():
   # first round
   rng = bob.core.random.mt19937(0)
   it = ISVTrainer(10)
-  it.rng = rng
-  it.initialize(ib, TRAINING_STATS)
+  #it.rng = rng
+  it.initialize(ib, TRAINING_STATS, rng)
   u1 = ib.u
   d1 = ib.d
 
   # second round
   rng = bob.core.random.mt19937(0)
-  it.rng = rng
-  it.initialize(ib, TRAINING_STATS)
+  #it.rng = rng
+  it.initialize(ib, TRAINING_STATS, rng)
   u2 = ib.u
   d2 = ib.d
 
diff --git a/bob/learn/em/test/test_kmeans_trainer.py b/bob/learn/em/test/test_kmeans_trainer.py
index 8794a927991ed7e8409b0390d5f7f63a2f541418..00dcbcc7ab007ee24d17bf9e28d571209ac6ab1e 100644
--- a/bob/learn/em/test/test_kmeans_trainer.py
+++ b/bob/learn/em/test/test_kmeans_trainer.py
@@ -90,9 +90,9 @@ def test_kmeans_noduplicate():
   # Defines machine and trainer
   machine = KMeansMachine(dim_c, dim_d)
   trainer = KMeansTrainer()
-  trainer.rng = bob.core.random.mt19937(seed)
+  rng = bob.core.random.mt19937(seed)
   trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
-  trainer.initialize(machine, data)
+  trainer.initialize(machine, data, rng)
   # Makes sure that the two initial mean vectors selected are different
   assert equals(machine.get_mean(0), machine.get_mean(1), 1e-8) == False
 
diff --git a/bob/learn/em/test/test_plda_trainer.py b/bob/learn/em/test/test_plda_trainer.py
index 02ede06e9a00123557ab74567af410536d10343c..b28ea45d69bd1cfcfbefcfa32aa977d7f9867bb3 100644
--- a/bob/learn/em/test/test_plda_trainer.py
+++ b/bob/learn/em/test/test_plda_trainer.py
@@ -719,22 +719,23 @@ def test_plda_comparisons():
 
   training_set = [numpy.array([[1,2,3,4]], numpy.float64), numpy.array([[3,4,3,4]], numpy.float64)]
   m = PLDABase(4,1,1,1e-8)
-  t1.rng.seed(37)
-  t1.initialize(m, training_set)
+  rng = bob.core.random.mt19937(37)
+  t1.initialize(m, training_set,rng)
   t1.eStep(m, training_set)
   t1.mStep(m, training_set)
   assert (t1 == t2 ) is False
   assert t1 != t2
   assert (t1.is_similar_to(t2) ) is False
-  t2.rng.seed(37)
-  t2.initialize(m, training_set)
+  rng = bob.core.random.mt19937(37)
+  t2.initialize(m, training_set, rng)
   t2.eStep(m, training_set)
   t2.mStep(m, training_set)
   assert t1 == t2
   assert (t1 != t2 ) is False
   assert t1.is_similar_to(t2)
-  t2.rng.seed(77)
-  t2.initialize(m, training_set)
+
+  rng = bob.core.random.mt19937(77)
+  t2.initialize(m, training_set, rng)
   t2.eStep(m, training_set)
   t2.mStep(m, training_set)
   assert (t1 == t2 ) is False
diff --git a/bob/learn/em/ztnorm.cpp b/bob/learn/em/ztnorm.cpp
index 61e861ac9e6ddf8fe5ecbdc27e0c2371fd7b21d4..9a95dc0b5b83ff20d49387cf54e62ea59f603d79 100644
--- a/bob/learn/em/ztnorm.cpp
+++ b/bob/learn/em/ztnorm.cpp
@@ -29,7 +29,7 @@ PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs) {
   char** kwlist = zt_norm.kwlist(0);
   
   PyBlitzArrayObject *rawscores_probes_vs_models_o, *rawscores_zprobes_vs_models_o, *rawscores_probes_vs_tmodels_o, 
-  *rawscores_zprobes_vs_tmodels_o, *mask_zprobes_vs_tmodels_istruetrial_o;
+  *rawscores_zprobes_vs_tmodels_o, *mask_zprobes_vs_tmodels_istruetrial_o=0;
 
   if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O&O&O&O&|O&", kwlist, &PyBlitzArray_Converter, &rawscores_probes_vs_models_o,
                                                                        &PyBlitzArray_Converter, &rawscores_zprobes_vs_models_o,
@@ -45,14 +45,12 @@ PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs) {
   auto rawscores_zprobes_vs_models_         = make_safe(rawscores_zprobes_vs_models_o);
   auto rawscores_probes_vs_tmodels_         = make_safe(rawscores_probes_vs_tmodels_o);
   auto rawscores_zprobes_vs_tmodels_        = make_safe(rawscores_zprobes_vs_tmodels_o);
-  //auto mask_zprobes_vs_tmodels_istruetrial_ = make_safe(mask_zprobes_vs_tmodels_istruetrial_o);
+  auto mask_zprobes_vs_tmodels_istruetrial_ = make_xsafe(mask_zprobes_vs_tmodels_istruetrial_o);
 
   blitz::Array<double,2>  rawscores_probes_vs_models = *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o);
   blitz::Array<double,2> normalized_scores = blitz::Array<double,2>(rawscores_probes_vs_models.extent(0), rawscores_probes_vs_models.extent(1));
 
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if(nargs==4)
+  if(!mask_zprobes_vs_tmodels_istruetrial_o)
     bob::learn::em::ztNorm(*PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_models_o),
                              *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_zprobes_vs_models_o),
                              *PyBlitzArrayCxx_AsBlitz<double,2>(rawscores_probes_vs_tmodels_o),