From 580ba1db8e3185323bd03f430e4f0c9466a0c947 Mon Sep 17 00:00:00 2001
From: Tiago Freitas Pereira <tiagofrepereira@gmail.com>
Date: Tue, 17 Feb 2015 12:08:09 +0100
Subject: [PATCH] Documenting and changing the documentation stuff

---
 bob/learn/em/ML_gmm_trainer.cpp               |  32 +-
 bob/learn/em/__MAP_gmm_trainer__.py           |  53 +--
 bob/learn/em/__ML_gmm_trainer__.py            |  85 ----
 bob/learn/em/__empca_trainer__.py             |  77 ---
 bob/learn/em/__init__.py                      |   8 +-
 bob/learn/em/__isv_trainer__.py               |  49 --
 bob/learn/em/__ivector_trainer__.py           |  51 --
 bob/learn/em/__jfa_trainer__.py               |  71 ---
 bob/learn/em/__kmeans_trainer__.py            |  81 ----
 bob/learn/em/__plda_trainer__.py              |  50 --
 bob/learn/em/gmm_base_trainer.cpp             | 437 ------------------
 .../em/include/bob.learn.em/ISVTrainer.h      |  12 +
 bob/learn/em/isv_trainer.cpp                  |  55 ++-
 bob/learn/em/ivector_trainer.cpp              |  20 +-
 bob/learn/em/jfa_trainer.cpp                  |   2 +-
 bob/learn/em/kmeans_machine.cpp               |   2 +-
 bob/learn/em/kmeans_trainer.cpp               |  15 +-
 bob/learn/em/linear_scoring.cpp               |   8 +-
 bob/learn/em/main.cpp                         |   6 +-
 bob/learn/em/main.h                           |  18 +-
 bob/learn/em/plda_trainer.cpp                 |  54 ++-
 bob/learn/em/test/test_em.py                  |  38 +-
 bob/learn/em/test/test_jfa_trainer.py         |  15 +-
 bob/learn/em/test/test_kmeans_trainer.py      |  21 +-
 bob/learn/em/test/test_plda_trainer.py        |  36 +-
 bob/learn/em/train.py                         |  61 +++
 bob/learn/em/ztnorm.cpp                       |  12 +-
 doc/guide.rst                                 |  24 +-
 28 files changed, 299 insertions(+), 1094 deletions(-)
 delete mode 100644 bob/learn/em/__ML_gmm_trainer__.py
 delete mode 100644 bob/learn/em/__empca_trainer__.py
 delete mode 100644 bob/learn/em/__isv_trainer__.py
 delete mode 100644 bob/learn/em/__ivector_trainer__.py
 delete mode 100644 bob/learn/em/__jfa_trainer__.py
 delete mode 100644 bob/learn/em/__kmeans_trainer__.py
 delete mode 100644 bob/learn/em/__plda_trainer__.py
 delete mode 100644 bob/learn/em/gmm_base_trainer.cpp
 create mode 100644 bob/learn/em/train.py

diff --git a/bob/learn/em/ML_gmm_trainer.cpp b/bob/learn/em/ML_gmm_trainer.cpp
index e9d9955..6b5b271 100644
--- a/bob/learn/em/ML_gmm_trainer.cpp
+++ b/bob/learn/em/ML_gmm_trainer.cpp
@@ -156,20 +156,21 @@ static auto initialize = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+.add_prototype("gmm_machine,data")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Ignored.");
 static PyObject* PyBobLearnEMMLGMMTrainer_initialize(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
   /* Parses input arguments in a single shot */
   char** kwlist = initialize.kwlist(0);
-
   PyBobLearnEMGMMMachineObject* gmm_machine = 0;
+  PyBlitzArrayObject* data                  = 0;  
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                  &PyBlitzArray_Converter, &data)) return 0;
+  auto data_ = make_safe(data);
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)){
-    PyErr_Format(PyExc_RuntimeError, "%s.%s. Was not possible to read :py:class:`bob.learn.em.GMMMachine`", Py_TYPE(self)->tp_name, initialize.name());
-    return 0;
-  }
   self->cxx->initialize(*gmm_machine->cxx);
   BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
 
@@ -222,17 +223,22 @@ static auto mStep = bob::extension::FunctionDoc(
 
   true
 )
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
+.add_prototype("gmm_machine,data")
+.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Ignored.");
 static PyObject* PyBobLearnEMMLGMMTrainer_mStep(PyBobLearnEMMLGMMTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
   /* Parses input arguments in a single shot */
   char** kwlist = mStep.kwlist(0);
 
-  PyBobLearnEMGMMMachineObject* gmm_machine;
-  
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) return 0;
+  PyBobLearnEMGMMMachineObject* gmm_machine = 0;
+  PyBlitzArrayObject* data                  = 0;  
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
+                                                                  &PyBlitzArray_Converter, &data)) return 0;
+  if(data != NULL)
+    auto data_ = make_safe(data);
 
   self->cxx->mStep(*gmm_machine->cxx);
 
@@ -330,6 +336,6 @@ bool init_BobLearnEMMLGMMTrainer(PyObject* module)
 
   // add the type to the module
   Py_INCREF(&PyBobLearnEMMLGMMTrainer_Type);
-  return PyModule_AddObject(module, "_ML_GMMTrainer", (PyObject*)&PyBobLearnEMMLGMMTrainer_Type) >= 0;
+  return PyModule_AddObject(module, "ML_GMMTrainer", (PyObject*)&PyBobLearnEMMLGMMTrainer_Type) >= 0;
 }
 
diff --git a/bob/learn/em/__MAP_gmm_trainer__.py b/bob/learn/em/__MAP_gmm_trainer__.py
index 6f4af38..322d88f 100644
--- a/bob/learn/em/__MAP_gmm_trainer__.py
+++ b/bob/learn/em/__MAP_gmm_trainer__.py
@@ -11,7 +11,7 @@ import numpy
 # define the class
 class MAP_GMMTrainer(_MAP_GMMTrainer):
 
-  def __init__(self, prior_gmm, update_means=True, update_variances=False, update_weights=False, convergence_threshold=0.001, max_iterations=10, converge_by_likelihood=True, **kwargs):
+  def __init__(self, prior_gmm, update_means=True, update_variances=False, update_weights=False, **kwargs):
     """
     :py:class:`bob.learn.em.MAP_GMMTrainer` constructor
 
@@ -43,57 +43,6 @@ class MAP_GMMTrainer(_MAP_GMMTrainer):
       relevance_factor = kwargs.get('relevance_factor')
       _MAP_GMMTrainer.__init__(self, prior_gmm, relevance_factor=relevance_factor, update_means=update_means, update_variances=update_variances,update_weights=update_weights)
     
-    self.convergence_threshold  = convergence_threshold
-    self.max_iterations         = max_iterations
-    self.converge_by_likelihood = converge_by_likelihood
-
-
- 
-
-
-  def train(self, gmm_machine, data):
-    """
-    Train the :py:class:bob.learn.em.GMMMachine using data
-
-    Keyword Parameters:
-      gmm_machine
-        The :py:class:bob.learn.em.GMMMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(gmm_machine);
-
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-
-    #eStep
-    self.eStep(gmm_machine, data);
-
-    if(self.converge_by_likelihood):
-      average_output = self.compute_likelihood(gmm_machine);    
-
-    for i in range(self.max_iterations):
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(gmm_machine);
-
-      #eStep
-      self.eStep(gmm_machine, data);
-
-      #Computes log likelihood if required
-      if(self.converge_by_likelihood):
-        average_output = self.compute_likelihood(gmm_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self.convergence_threshold:
-          break
-
 
 # copy the documentation from the base class
 __doc__ = _MAP_GMMTrainer.__doc__
diff --git a/bob/learn/em/__ML_gmm_trainer__.py b/bob/learn/em/__ML_gmm_trainer__.py
deleted file mode 100644
index 35ac30c..0000000
--- a/bob/learn/em/__ML_gmm_trainer__.py
+++ /dev/null
@@ -1,85 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Jan 22 18:29:10 2015
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _ML_GMMTrainer
-import numpy
-
-# define the class
-class ML_GMMTrainer(_ML_GMMTrainer):
-
-  def __init__(self, update_means=True, update_variances=False, update_weights=False, convergence_threshold=0.001, max_iterations=10, converge_by_likelihood=True):
-    """
-    :py:class:bob.learn.em.ML_GMMTrainer constructor
-
-    Keyword Parameters:
-      update_means
-
-      update_variances
-
-      update_weights
- 
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      converge_by_likelihood
-        Tells whether we compute log_likelihood as a convergence criteria, or not 
-        
-    """
-
-    _ML_GMMTrainer.__init__(self, update_means=update_means, update_variances=update_variances, update_weights=update_weights)
-    self.convergence_threshold  = convergence_threshold
-    self.max_iterations         = max_iterations
-    self.converge_by_likelihood = converge_by_likelihood
-
-
-  def train(self, gmm_machine, data):
-    """
-    Train the :py:class:bob.learn.em.GMMMachine using data
-
-    Keyword Parameters:
-      gmm_machine
-        The :py:class:bob.learn.em.GMMMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(gmm_machine);
-
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-
-    #eStep
-    self.eStep(gmm_machine, data);
-
-    if(self.converge_by_likelihood):
-      average_output = self.compute_likelihood(gmm_machine);    
-
-    for i in range(self.max_iterations):
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(gmm_machine);
-
-      #eStep
-      self.eStep(gmm_machine, data);
-
-      #Computes log likelihood if required
-      if(self.converge_by_likelihood):
-        average_output = self.compute_likelihood(gmm_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self.convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _ML_GMMTrainer.__doc__
diff --git a/bob/learn/em/__empca_trainer__.py b/bob/learn/em/__empca_trainer__.py
deleted file mode 100644
index d710dc8..0000000
--- a/bob/learn/em/__empca_trainer__.py
+++ /dev/null
@@ -1,77 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Wed Fev 04 13:35:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _EMPCATrainer
-import numpy
-
-# define the class
-class EMPCATrainer (_EMPCATrainer):
-
-  def __init__(self, convergence_threshold=0.001, max_iterations=10, compute_likelihood=True):
-    """
-    :py:class:`bob.learn.em.EMPCATrainer` constructor
-
-    Keyword Parameters:
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      compute_likelihood
-        
-    """
-
-    _EMPCATrainer.__init__(self,convergence_threshold)
-    self._max_iterations        = max_iterations
-    self._compute_likelihood    = compute_likelihood
-
-
-  def train(self, linear_machine, data):
-    """
-    Train the :py:class:bob.learn.em.LinearMachine using data
-
-    Keyword Parameters:
-      linear_machine
-        The :py:class:bob.learn.em.LinearMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(linear_machine, data);
-      
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-    #eStep
-    self.eStep(linear_machine, data);
-
-    if(self._compute_likelihood):
-      average_output = self.compute_likelihood(linear_machine);
-    
-    for i in range(self._max_iterations):
-
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(linear_machine);
-
-      #eStep
-      self.eStep(linear_machine, data);
-
-      #Computes log likelihood if required
-      if(self._compute_likelihood):
-        average_output = self.compute_likelihood(linear_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self._convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _EMPCATrainer.__doc__
diff --git a/bob/learn/em/__init__.py b/bob/learn/em/__init__.py
index dbe487b..4e9c455 100644
--- a/bob/learn/em/__init__.py
+++ b/bob/learn/em/__init__.py
@@ -11,14 +11,8 @@ bob.extension.load_bob_library('bob.learn.em', __file__)
 from ._library import *
 from . import version
 from .version import module as __version__
-from .__kmeans_trainer__ import *
-from .__ML_gmm_trainer__ import *
 from .__MAP_gmm_trainer__ import *
-from .__jfa_trainer__ import *
-from .__isv_trainer__ import *
-from .__ivector_trainer__ import *
-from .__plda_trainer__ import *
-
+from train import *
 
 def ztnorm_same_value(vect_a, vect_b):
   """Computes the matrix of boolean D for the ZT-norm, which indicates where
diff --git a/bob/learn/em/__isv_trainer__.py b/bob/learn/em/__isv_trainer__.py
deleted file mode 100644
index 9f4b84f..0000000
--- a/bob/learn/em/__isv_trainer__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Fev 02 21:40:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _ISVTrainer
-import numpy
-
-# define the class
-class ISVTrainer (_ISVTrainer):
-
-  def __init__(self, max_iterations=10, relevance_factor=4.):
-    """
-    :py:class:`bob.learn.em.ISVTrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-    """
-    _ISVTrainer.__init__(self, relevance_factor)
-    self._max_iterations         = max_iterations
-
-
-  def train(self, isv_base, data):
-    """
-    Train the :py:class:`bob.learn.em.ISVBase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.em.ISVBase` class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(isv_base, data);
-      
-    for i in range(self._max_iterations):
-      #eStep
-      self.eStep(isv_base, data);
-      #mStep
-      self.mStep(isv_base);
-
-
-
-# copy the documentation from the base class
-__doc__ = _ISVTrainer.__doc__
diff --git a/bob/learn/em/__ivector_trainer__.py b/bob/learn/em/__ivector_trainer__.py
deleted file mode 100644
index 579a1d8..0000000
--- a/bob/learn/em/__ivector_trainer__.py
+++ /dev/null
@@ -1,51 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Tue Fev 03 13:20:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _IVectorTrainer
-import numpy
-
-# define the class
-class IVectorTrainer (_IVectorTrainer):
-
-  def __init__(self, max_iterations=10, update_sigma=False):
-    """
-    :py:class:`bob.learn.em.IVectorTrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-      update_sigma
-        
-    """
-    _IVectorTrainer.__init__(self, update_sigma)
-    self._max_iterations         = max_iterations
-
-
-  def train(self, ivector_machine, data):
-    """
-    Train the :py:class:`bob.learn.em.IVectorMachine` using data
-
-    Keyword Parameters:
-      ivector_machine
-        The `:py:class:bob.learn.em.IVectorMachine` class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(ivector_machine);
-      
-    for i in range(self._max_iterations):
-      #eStep
-      self.eStep(ivector_machine, data);
-      #mStep
-      self.mStep(ivector_machine);
-
-
-
-# copy the documentation from the base class
-__doc__ = _IVectorTrainer.__doc__
diff --git a/bob/learn/em/__jfa_trainer__.py b/bob/learn/em/__jfa_trainer__.py
deleted file mode 100644
index 2d8ccec..0000000
--- a/bob/learn/em/__jfa_trainer__.py
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Sun Fev 01 21:10:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _JFATrainer
-import numpy
-
-# define the class
-class JFATrainer (_JFATrainer):
-
-  def __init__(self, max_iterations=10):
-    """
-    :py:class:`bob.learn.em.JFATrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-    """
-
-    _JFATrainer.__init__(self)
-    self._max_iterations         = max_iterations
-
-
-  def train_loop(self, jfa_base, data):
-    """
-    Train the :py:class:`bob.learn.em.JFABase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.em.JFABase` class
-      data
-        The data to be trained
-    """
-    #V Subspace
-    for i in range(self._max_iterations):
-      self.e_step1(jfa_base, data)
-      self.m_step1(jfa_base, data)
-    self.finalize1(jfa_base, data)
-
-    #U subspace
-    for i in range(self._max_iterations):
-      self.e_step2(jfa_base, data)
-      self.m_step2(jfa_base, data)
-    self.finalize2(jfa_base, data)
-
-    # d subspace
-    for i in range(self._max_iterations):
-      self.e_step3(jfa_base, data)
-      self.m_step3(jfa_base, data)
-    self.finalize3(jfa_base, data)
-
-
-  def train(self, jfa_base, data):
-    """
-    Train the :py:class:`bob.learn.em.JFABase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.em.JFABase` class
-      data
-        The data to be trained
-    """
-    self.initialize(jfa_base, data)
-    self.train_loop(jfa_base, data)
-
-
-# copy the documentation from the base class
-__doc__ = _JFATrainer.__doc__
diff --git a/bob/learn/em/__kmeans_trainer__.py b/bob/learn/em/__kmeans_trainer__.py
deleted file mode 100644
index b7ef48e..0000000
--- a/bob/learn/em/__kmeans_trainer__.py
+++ /dev/null
@@ -1,81 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Jan 19 11:35:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _KMeansTrainer
-import numpy
-
-# define the class
-class KMeansTrainer (_KMeansTrainer):
-
-  def __init__(self, initialization_method="RANDOM", convergence_threshold=0.001, max_iterations=10, converge_by_average_min_distance=True):
-    """
-    :py:class:`bob.learn.em.KMeansTrainer` constructor
-
-    Keyword Parameters:
-      initialization_method
-        The initialization method to generate the initial means
-      convergence_threshold
-        Convergence threshold
-      max_iterations
-        Number of maximum iterations
-      converge_by_average_min_distance
-        Tells whether we compute the average min (square Euclidean) distance, as a convergence criteria, or not 
-        
-    """
-
-    _KMeansTrainer.__init__(self, initialization_method="RANDOM", )
-    self._convergence_threshold = convergence_threshold
-    self._max_iterations         = max_iterations
-    self._converge_by_average_min_distance = converge_by_average_min_distance
-
-
-  def train(self, kmeans_machine, data):
-    """
-    Train the :py:class:bob.learn.em.KMeansMachine using data
-
-    Keyword Parameters:
-      kmeans_machine
-        The :py:class:bob.learn.em.KMeansMachine class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(kmeans_machine, data);
-      
-    #Do the Expectation-Maximization algorithm
-    average_output_previous = 0
-    average_output = -numpy.inf;
-
-    #eStep
-    self.eStep(kmeans_machine, data);
-
-    if(self._converge_by_average_min_distance):
-      average_output = self.compute_likelihood(kmeans_machine);
-    
-    for i in range(self._max_iterations):
-
-      #saves average output from last iteration
-      average_output_previous = average_output;
-
-      #mStep
-      self.mStep(kmeans_machine);
-
-      #eStep
-      self.eStep(kmeans_machine, data);
-
-      #Computes log likelihood if required
-      if(self._converge_by_average_min_distance):
-        average_output = self.compute_likelihood(kmeans_machine);
-
-        #Terminates if converged (and likelihood computation is set)
-        if abs((average_output_previous - average_output)/average_output_previous) <= self._convergence_threshold:
-          break
-
-
-# copy the documentation from the base class
-__doc__ = _KMeansTrainer.__doc__
diff --git a/bob/learn/em/__plda_trainer__.py b/bob/learn/em/__plda_trainer__.py
deleted file mode 100644
index 3989b8d..0000000
--- a/bob/learn/em/__plda_trainer__.py
+++ /dev/null
@@ -1,50 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
-# Mon Fev 02 21:40:10 2015 +0200
-#
-# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
-
-from ._library import _PLDATrainer
-import numpy
-
-# define the class
-class PLDATrainer (_PLDATrainer):
-
-  def __init__(self, max_iterations=10, use_sum_second_order=False):
-    """
-    :py:class:`bob.learn.em.PLDATrainer` constructor
-
-    Keyword Parameters:
-      max_iterations
-        Number of maximum iterations
-    """
-    _PLDATrainer.__init__(self, use_sum_second_order)
-    self._max_iterations         = max_iterations
-
-
-  def train(self, plda_base, data):
-    """
-    Train the :py:class:`bob.learn.em.PLDABase` using data
-
-    Keyword Parameters:
-      jfa_base
-        The `:py:class:bob.learn.em.PLDABase` class
-      data
-        The data to be trained
-    """
-
-    #Initialization
-    self.initialize(plda_base, data);
-      
-    for i in range(self._max_iterations):
-      #eStep
-      self.e_step(plda_base, data);
-      #mStep
-      self.m_step(plda_base, data);
-    self.finalize(plda_base, data);
-
-
-
-# copy the documentation from the base class
-__doc__ = _PLDATrainer.__doc__
diff --git a/bob/learn/em/gmm_base_trainer.cpp b/bob/learn/em/gmm_base_trainer.cpp
deleted file mode 100644
index 96dfdc6..0000000
--- a/bob/learn/em/gmm_base_trainer.cpp
+++ /dev/null
@@ -1,437 +0,0 @@
-/**
- * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
- * @date Web 21 Jan 12:30:00 2015
- *
- * @brief Python API for bob::learn::em
- *
- * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
- */
-
-#include "main.h"
-#include <boost/make_shared.hpp>
-
-/******************************************************************/
-/************ Constructor Section *********************************/
-/******************************************************************/
-
-static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}  /* converts PyObject to bool and returns false if object is NULL */
-
-static auto GMMBaseTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX ".GMMBaseTrainer",
-  "This class implements the E-step of the expectation-maximisation"
-  "algorithm for a :py:class:`bob.learn.em.GMMMachine`"
-).add_constructor(
-  bob::extension::FunctionDoc(
-    "__init__",
-    "Creates a GMMBaseTrainer",
-    "",
-    true
-  )
-  .add_prototype("update_means, [update_variances], [update_weights], [mean_var_update_responsibilities_threshold]","")
-  .add_prototype("other","")
-  .add_prototype("","")
-
-  .add_parameter("update_means", "bool", "Update means on each iteration")
-  .add_parameter("update_variances", "bool", "Update variances on each iteration")
-  .add_parameter("update_weights", "bool", "Update weights on each iteration")
-  .add_parameter("mean_var_update_responsibilities_threshold", "float", "Threshold over the responsibilities of the Gaussians Equations 9.24, 9.25 of Bishop, `Pattern recognition and machine learning`, 2006 require a division by the responsibilities, which might be equal to zero because of numerical issue. This threshold is used to avoid such divisions.")
-  .add_parameter("other", ":py:class:`bob.learn.em.GMMBaseTrainer`", "A GMMBaseTrainer object to be copied.")
-);
-
-
-
-static int PyBobLearnEMGMMBaseTrainer_init_copy(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMBaseTrainer_doc.kwlist(1);
-  PyBobLearnEMGMMBaseTrainerObject* tt;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMBaseTrainer_Type, &tt)){
-    GMMBaseTrainer_doc.print_usage();
-    return -1;
-  }
-
-  self->cxx.reset(new bob::learn::em::GMMBaseTrainer(*tt->cxx));
-  return 0;
-}
-
-
-static int PyBobLearnEMGMMBaseTrainer_init_bool(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-
-  char** kwlist = GMMBaseTrainer_doc.kwlist(0);
-  PyObject* update_means     = 0;
-  PyObject* update_variances = 0;
-  PyObject* update_weights   = 0;
-  double mean_var_update_responsibilities_threshold = std::numeric_limits<double>::epsilon();
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!O!d", kwlist, &PyBool_Type, &update_means, &PyBool_Type, 
-                                                             &update_variances, &PyBool_Type, &update_weights, &mean_var_update_responsibilities_threshold)){
-    GMMBaseTrainer_doc.print_usage();
-    return -1;
-  }
-  self->cxx.reset(new bob::learn::em::GMMBaseTrainer(f(update_means), f(update_variances), f(update_weights), mean_var_update_responsibilities_threshold));
-  return 0;
-}
-
-
-static int PyBobLearnEMGMMBaseTrainer_init(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
-
-  if (nargs==0){ //default initializer ()
-    self->cxx.reset(new bob::learn::em::GMMBaseTrainer());
-    return 0;
-  }
-  else{
-    //Reading the input argument
-    PyObject* arg = 0;
-    if (PyTuple_Size(args))
-      arg = PyTuple_GET_ITEM(args, 0);
-    else {
-      PyObject* tmp = PyDict_Values(kwargs);
-      auto tmp_ = make_safe(tmp);
-      arg = PyList_GET_ITEM(tmp, 0);
-    }
-
-    // If the constructor input is GMMBaseTrainer object
-    if (PyBobLearnEMGMMBaseTrainer_Check(arg))
-      return PyBobLearnEMGMMBaseTrainer_init_copy(self, args, kwargs);
-    else
-      return PyBobLearnEMGMMBaseTrainer_init_bool(self, args, kwargs);
-  }
-
-  BOB_CATCH_MEMBER("cannot create GMMBaseTrainer_init_bool", 0)
-  return 0;
-}
-
-
-static void PyBobLearnEMGMMBaseTrainer_delete(PyBobLearnEMGMMBaseTrainerObject* self) {
-  self->cxx.reset();
-  Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-
-int PyBobLearnEMGMMBaseTrainer_Check(PyObject* o) {
-  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnEMGMMBaseTrainer_Type));
-}
-
-
-static PyObject* PyBobLearnEMGMMBaseTrainer_RichCompare(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* other, int op) {
-  BOB_TRY
-
-  if (!PyBobLearnEMGMMBaseTrainer_Check(other)) {
-    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
-    return 0;
-  }
-  auto other_ = reinterpret_cast<PyBobLearnEMGMMBaseTrainerObject*>(other);
-  switch (op) {
-    case Py_EQ:
-      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
-    case Py_NE:
-      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
-    default:
-      Py_INCREF(Py_NotImplemented);
-      return Py_NotImplemented;
-  }
-  BOB_CATCH_MEMBER("cannot compare GMMBaseTrainer objects", 0)
-}
-
-
-/******************************************************************/
-/************ Variables Section ***********************************/
-/******************************************************************/
-
-
-/***** gmm_stats *****/
-static auto gmm_stats = bob::extension::VariableDoc(
-  "gmm_stats",
-  ":py:class:`bob.learn.em.GMMStats`",
-  "Get/Set GMMStats",
-  ""
-);
-PyObject* PyBobLearnEMGMMBaseTrainer_getGMMStats(PyBobLearnEMGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-
-  bob::learn::em::GMMStats stats = self->cxx->getGMMStats();
-  boost::shared_ptr<bob::learn::em::GMMStats> stats_shared = boost::make_shared<bob::learn::em::GMMStats>(stats);
-
-  //Allocating the correspondent python object
-  PyBobLearnEMGMMStatsObject* retval =
-    (PyBobLearnEMGMMStatsObject*)PyBobLearnEMGMMStats_Type.tp_alloc(&PyBobLearnEMGMMStats_Type, 0);
-
-  retval->cxx = stats_shared;
-
-  return Py_BuildValue("O",retval);
-  BOB_CATCH_MEMBER("GMMStats could not be read", 0)
-}
-/*
-int PyBobLearnEMGMMBaseTrainer_setGMMStats(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* value, void*){
-  BOB_TRY
-
-  if (!PyBobLearnEMGMMStats_Check(value)){
-    PyErr_Format(PyExc_RuntimeError, "%s %s expects a :py:class:`bob.learn.em.GMMStats`", Py_TYPE(self)->tp_name, gmm_stats.name());
-    return -1;
-  }
-
-  PyBobLearnEMGMMStatsObject* stats = 0;
-  PyArg_Parse(value, "O!", &PyBobLearnEMGMMStats_Type,&stats);
-
-  self->cxx->setGMMStats(*stats->cxx);
-
-  return 0;
-  BOB_CATCH_MEMBER("gmm_stats could not be set", -1)  
-}
-*/
-
-
-/***** update_means *****/
-static auto update_means = bob::extension::VariableDoc(
-  "update_means",
-  "bool",
-  "Update means on each iteration",
-  ""
-);
-PyObject* PyBobLearnEMGMMBaseTrainer_getUpdateMeans(PyBobLearnEMGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("O",self->cxx->getUpdateMeans()?Py_True:Py_False);
-  BOB_CATCH_MEMBER("update_means could not be read", 0)
-}
-
-/***** update_variances *****/
-static auto update_variances = bob::extension::VariableDoc(
-  "update_variances",
-  "bool",
-  "Update variances on each iteration",
-  ""
-);
-PyObject* PyBobLearnEMGMMBaseTrainer_getUpdateVariances(PyBobLearnEMGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("O",self->cxx->getUpdateVariances()?Py_True:Py_False);
-  BOB_CATCH_MEMBER("update_variances could not be read", 0)
-}
-
-
-/***** update_weights *****/
-static auto update_weights = bob::extension::VariableDoc(
-  "update_weights",
-  "bool",
-  "Update weights on each iteration",
-  ""
-);
-PyObject* PyBobLearnEMGMMBaseTrainer_getUpdateWeights(PyBobLearnEMGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("O",self->cxx->getUpdateWeights()?Py_True:Py_False);
-  BOB_CATCH_MEMBER("update_weights could not be read", 0)
-}
-
-
-    
-     
-
-/***** mean_var_update_responsibilities_threshold *****/
-static auto mean_var_update_responsibilities_threshold = bob::extension::VariableDoc(
-  "mean_var_update_responsibilities_threshold",
-  "bool",
-  "Threshold over the responsibilities of the Gaussians" 
-  "Equations 9.24, 9.25 of Bishop, \"Pattern recognition and machine learning\", 2006" 
-  "require a division by the responsibilities, which might be equal to zero" 
-  "because of numerical issue. This threshold is used to avoid such divisions.",
-  ""
-);
-PyObject* PyBobLearnEMGMMBaseTrainer_getMeanVarUpdateResponsibilitiesThreshold(PyBobLearnEMGMMBaseTrainerObject* self, void*){
-  BOB_TRY
-  return Py_BuildValue("d",self->cxx->getMeanVarUpdateResponsibilitiesThreshold());
-  BOB_CATCH_MEMBER("update_weights could not be read", 0)
-}
-
-
-static PyGetSetDef PyBobLearnEMGMMBaseTrainer_getseters[] = { 
-  {
-    update_means.name(),
-    (getter)PyBobLearnEMGMMBaseTrainer_getUpdateMeans,
-    0,
-    update_means.doc(),
-    0
-  },
-  {
-    update_variances.name(),
-    (getter)PyBobLearnEMGMMBaseTrainer_getUpdateVariances,
-    0,
-    update_variances.doc(),
-    0
-  },
-  {
-    update_weights.name(),
-    (getter)PyBobLearnEMGMMBaseTrainer_getUpdateWeights,
-    0,
-    update_weights.doc(),
-    0
-  },  
-  {
-    mean_var_update_responsibilities_threshold.name(),
-    (getter)PyBobLearnEMGMMBaseTrainer_getMeanVarUpdateResponsibilitiesThreshold,
-    0,
-    mean_var_update_responsibilities_threshold.doc(),
-    0
-  },  
-  {
-    gmm_stats.name(),
-    (getter)PyBobLearnEMGMMBaseTrainer_getGMMStats,
-    0, //(setter)PyBobLearnEMGMMBaseTrainer_setGMMStats,
-    gmm_stats.doc(),
-    0
-  },  
-  {0}  // Sentinel
-};
-
-
-/******************************************************************/
-/************ Functions Section ***********************************/
-/******************************************************************/
-
-/*** initialize ***/
-static auto initialize = bob::extension::FunctionDoc(
-  "initialize",
-  "Initialization before the EM steps",
-  "Instanciate :py:class:`bob.learn.em.GMMStats`",
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnEMGMMBaseTrainer_initialize(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = initialize.kwlist(0);
-
-  PyBobLearnEMGMMMachineObject* gmm_machine = 0;
-
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  self->cxx->initialize(*gmm_machine->cxx);
-
-  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-
-/*** eStep ***/
-static auto eStep = bob::extension::FunctionDoc(
-  "eStep",
-  "Calculates and saves statistics across the dataset,"
-  "and saves these as m_ss. ",
-
-  "Calculates the average log likelihood of the observations given the GMM,"
-  "and returns this in average_log_likelihood."
-  "The statistics, m_ss, will be used in the mStep() that follows.",
-
-  true
-)
-.add_prototype("gmm_machine,data")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object")
-.add_parameter("data", "array_like <float, 2D>", "Input data");
-static PyObject* PyBobLearnEMGMMBaseTrainer_eStep(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = eStep.kwlist(0);
-
-  PyBobLearnEMGMMMachineObject* gmm_machine;
-  PyBlitzArrayObject* data = 0;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine,
-                                                                 &PyBlitzArray_Converter, &data)) Py_RETURN_NONE;
-  auto data_ = make_safe(data);
-
-  self->cxx->eStep(*gmm_machine->cxx, *PyBlitzArrayCxx_AsBlitz<double,2>(data));
-
-  BOB_CATCH_MEMBER("cannot perform the eStep method", 0)
-
-  Py_RETURN_NONE;
-}
-
-
-/*** computeLikelihood ***/
-static auto compute_likelihood = bob::extension::FunctionDoc(
-  "compute_likelihood",
-  "This functions returns the average min (Square Euclidean) distance (average distance to the closest mean)",
-  0,
-  true
-)
-.add_prototype("gmm_machine")
-.add_parameter("gmm_machine", ":py:class:`bob.learn.em.GMMMachine`", "GMMMachine Object");
-static PyObject* PyBobLearnEMGMMBaseTrainer_compute_likelihood(PyBobLearnEMGMMBaseTrainerObject* self, PyObject* args, PyObject* kwargs) {
-  BOB_TRY
-
-  /* Parses input arguments in a single shot */
-  char** kwlist = compute_likelihood.kwlist(0);
-
-  PyBobLearnEMGMMMachineObject* gmm_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMGMMMachine_Type, &gmm_machine)) Py_RETURN_NONE;
-
-  double value = self->cxx->computeLikelihood(*gmm_machine->cxx);
-  return Py_BuildValue("d", value);
-
-  BOB_CATCH_MEMBER("cannot perform the computeLikelihood method", 0)
-}
-
-
-static PyMethodDef PyBobLearnEMGMMBaseTrainer_methods[] = {
-  {
-    initialize.name(),
-    (PyCFunction)PyBobLearnEMGMMBaseTrainer_initialize,
-    METH_VARARGS|METH_KEYWORDS,
-    initialize.doc()
-  },
-  {
-    eStep.name(),
-    (PyCFunction)PyBobLearnEMGMMBaseTrainer_eStep,
-    METH_VARARGS|METH_KEYWORDS,
-    eStep.doc()
-  },
-  {
-    compute_likelihood.name(),
-    (PyCFunction)PyBobLearnEMGMMBaseTrainer_compute_likelihood,
-    METH_VARARGS|METH_KEYWORDS,
-    compute_likelihood.doc()
-  },
-  {0} /* Sentinel */
-};
-
-
-/******************************************************************/
-/************ Module Section **************************************/
-/******************************************************************/
-
-// Define the Gaussian type struct; will be initialized later
-PyTypeObject PyBobLearnEMGMMBaseTrainer_Type = {
-  PyVarObject_HEAD_INIT(0,0)
-  0
-};
-
-bool init_BobLearnEMGMMBaseTrainer(PyObject* module)
-{
-  // initialize the type struct
-  PyBobLearnEMGMMBaseTrainer_Type.tp_name      = GMMBaseTrainer_doc.name();
-  PyBobLearnEMGMMBaseTrainer_Type.tp_basicsize = sizeof(PyBobLearnEMGMMBaseTrainerObject);
-  PyBobLearnEMGMMBaseTrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
-  PyBobLearnEMGMMBaseTrainer_Type.tp_doc       = GMMBaseTrainer_doc.doc();
-
-  // set the functions
-  PyBobLearnEMGMMBaseTrainer_Type.tp_new          = PyType_GenericNew;
-  PyBobLearnEMGMMBaseTrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnEMGMMBaseTrainer_init);
-  PyBobLearnEMGMMBaseTrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnEMGMMBaseTrainer_delete);
-  PyBobLearnEMGMMBaseTrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnEMGMMBaseTrainer_RichCompare);
-  PyBobLearnEMGMMBaseTrainer_Type.tp_methods      = PyBobLearnEMGMMBaseTrainer_methods;
-  PyBobLearnEMGMMBaseTrainer_Type.tp_getset       = PyBobLearnEMGMMBaseTrainer_getseters;
-  PyBobLearnEMGMMBaseTrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnEMGMMBaseTrainer_compute_likelihood);
-
-
-  // check that everything is fine
-  if (PyType_Ready(&PyBobLearnEMGMMBaseTrainer_Type) < 0) return false;
-
-  // add the type to the module
-  Py_INCREF(&PyBobLearnEMGMMBaseTrainer_Type);
-  return PyModule_AddObject(module, "GMMBaseTrainer", (PyObject*)&PyBobLearnEMGMMBaseTrainer_Type) >= 0;
-}
-
diff --git a/bob/learn/em/include/bob.learn.em/ISVTrainer.h b/bob/learn/em/include/bob.learn.em/ISVTrainer.h
index e314ad6..f0177ec 100644
--- a/bob/learn/em/include/bob.learn.em/ISVTrainer.h
+++ b/bob/learn/em/include/bob.learn.em/ISVTrainer.h
@@ -133,6 +133,18 @@ class ISVTrainer
     { m_base_trainer.setAccUA1(acc); }
     void setAccUA2(const blitz::Array<double,2>& acc)
     { m_base_trainer.setAccUA2(acc); }
+    
+    /**
+     * @brief Sets the Random Number Generator
+     */
+    void setRng(const boost::shared_ptr<boost::mt19937> rng)
+    { m_rng = rng; }
+
+    /**
+     * @brief Gets the Random Number Generator
+     */
+    const boost::shared_ptr<boost::mt19937> getRng() const
+    { return m_rng; }
 
 
   private:
diff --git a/bob/learn/em/isv_trainer.cpp b/bob/learn/em/isv_trainer.cpp
index 44f294b..66f99e1 100644
--- a/bob/learn/em/isv_trainer.cpp
+++ b/bob/learn/em/isv_trainer.cpp
@@ -99,7 +99,6 @@ static auto ISVTrainer_doc = bob::extension::ClassDoc(
   .add_prototype("","")
   .add_parameter("other", ":py:class:`bob.learn.em.ISVTrainer`", "A ISVTrainer object to be copied.")
   .add_parameter("relevance_factor", "double", "")
-  .add_parameter("convergence_threshold", "double", "")
 );
 
 
@@ -266,9 +265,6 @@ int PyBobLearnEMISVTrainer_set_acc_u_a2(PyBobLearnEMISVTrainerObject* self, PyOb
 }
 
 
-
-
-
 static auto __X__ = bob::extension::VariableDoc(
   "__X__",
   "list",
@@ -329,6 +325,39 @@ int PyBobLearnEMISVTrainer_set_Z(PyBobLearnEMISVTrainerObject* self, PyObject* v
 }
 
 
+/***** rng *****/
+static auto rng = bob::extension::VariableDoc(
+  "rng",
+  "str",
+  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
+  ""
+);
+PyObject* PyBobLearnEMISVTrainer_getRng(PyBobLearnEMISVTrainerObject* self, void*) {
+  BOB_TRY
+  //Allocating the correspondent python object
+  
+  PyBoostMt19937Object* retval =
+    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
+
+  retval->rng = self->cxx->getRng().get();
+  return Py_BuildValue("O", retval);
+  BOB_CATCH_MEMBER("Rng method could not be read", 0)
+}
+int PyBobLearnEMISVTrainer_setRng(PyBobLearnEMISVTrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBoostMt19937_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
+    return -1;
+  }
+
+  PyBoostMt19937Object* boostObject = 0;
+  PyBoostMt19937_Converter(value, &boostObject);
+  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
+
+  return 0;
+  BOB_CATCH_MEMBER("Rng could not be set", 0)
+}
 
 
 static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = { 
@@ -361,6 +390,13 @@ static PyGetSetDef PyBobLearnEMISVTrainer_getseters[] = {
    0
   },
   
+  {
+   rng.name(),
+   (getter)PyBobLearnEMISVTrainer_getRng,
+   (setter)PyBobLearnEMISVTrainer_setRng,
+   rng.doc(),
+   0
+  },
   
 
   {0}  // Sentinel
@@ -442,8 +478,9 @@ static auto m_step = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("isv_base")
-.add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object");
+.add_prototype("isv_base, stats")
+.add_parameter("isv_base", ":py:class:`bob.learn.em.ISVBase`", "ISVBase Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Ignored");
 static PyObject* PyBobLearnEMISVTrainer_m_step(PyBobLearnEMISVTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -451,8 +488,10 @@ static PyObject* PyBobLearnEMISVTrainer_m_step(PyBobLearnEMISVTrainerObject* sel
   char** kwlist = m_step.kwlist(0);
 
   PyBobLearnEMISVBaseObject* isv_base = 0;
+  PyObject* stats = 0;  
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!", kwlist, &PyBobLearnEMISVBase_Type, &isv_base,
+                                                                 &PyList_Type, &stats)) return 0;
 
   self->cxx->mStep(*isv_base->cxx);
 
@@ -561,6 +600,6 @@ bool init_BobLearnEMISVTrainer(PyObject* module)
 
   // add the type to the module
   Py_INCREF(&PyBobLearnEMISVTrainer_Type);
-  return PyModule_AddObject(module, "_ISVTrainer", (PyObject*)&PyBobLearnEMISVTrainer_Type) >= 0;
+  return PyModule_AddObject(module, "ISVTrainer", (PyObject*)&PyBobLearnEMISVTrainer_Type) >= 0;
 }
 
diff --git a/bob/learn/em/ivector_trainer.cpp b/bob/learn/em/ivector_trainer.cpp
index 72bced8..2d639ac 100644
--- a/bob/learn/em/ivector_trainer.cpp
+++ b/bob/learn/em/ivector_trainer.cpp
@@ -313,8 +313,9 @@ static auto initialize = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("ivector_machine")
-.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object");
+.add_prototype("ivector_machine, stats")
+.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Ignored");
 static PyObject* PyBobLearnEMIVectorTrainer_initialize(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -322,8 +323,10 @@ static PyObject* PyBobLearnEMIVectorTrainer_initialize(PyBobLearnEMIVectorTraine
   char** kwlist = initialize.kwlist(0);
 
   PyBobLearnEMIVectorMachineObject* ivector_machine = 0;
+  PyObject* stats = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine,
+                                                                 &PyList_Type, &stats)) return 0;
 
   self->cxx->initialize(*ivector_machine->cxx);
 
@@ -371,8 +374,9 @@ static auto m_step = bob::extension::FunctionDoc(
   "",
   true
 )
-.add_prototype("ivector_machine")
-.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object");
+.add_prototype("ivector_machine, stats")
+.add_parameter("ivector_machine", ":py:class:`bob.learn.em.ISVBase`", "IVectorMachine Object")
+.add_parameter("stats", ":py:class:`bob.learn.em.GMMStats`", "Ignored");
 static PyObject* PyBobLearnEMIVectorTrainer_m_step(PyBobLearnEMIVectorTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -380,8 +384,10 @@ static PyObject* PyBobLearnEMIVectorTrainer_m_step(PyBobLearnEMIVectorTrainerObj
   char** kwlist = m_step.kwlist(0);
 
   PyBobLearnEMIVectorMachineObject* ivector_machine = 0;
+  PyObject* stats = 0;
 
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine)) return 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!|O!", kwlist, &PyBobLearnEMIVectorMachine_Type, &ivector_machine,
+                                                                 &PyList_Type, &stats)) return 0;
 
   self->cxx->mStep(*ivector_machine->cxx);
 
@@ -448,6 +454,6 @@ bool init_BobLearnEMIVectorTrainer(PyObject* module)
 
   // add the type to the module
   Py_INCREF(&PyBobLearnEMIVectorTrainer_Type);
-  return PyModule_AddObject(module, "_IVectorTrainer", (PyObject*)&PyBobLearnEMIVectorTrainer_Type) >= 0;
+  return PyModule_AddObject(module, "IVectorTrainer", (PyObject*)&PyBobLearnEMIVectorTrainer_Type) >= 0;
 }
 
diff --git a/bob/learn/em/jfa_trainer.cpp b/bob/learn/em/jfa_trainer.cpp
index edcc8ec..5a1734c 100644
--- a/bob/learn/em/jfa_trainer.cpp
+++ b/bob/learn/em/jfa_trainer.cpp
@@ -1008,6 +1008,6 @@ bool init_BobLearnEMJFATrainer(PyObject* module)
 
   // add the type to the module
   Py_INCREF(&PyBobLearnEMJFATrainer_Type);
-  return PyModule_AddObject(module, "_JFATrainer", (PyObject*)&PyBobLearnEMJFATrainer_Type) >= 0;
+  return PyModule_AddObject(module, "JFATrainer", (PyObject*)&PyBobLearnEMJFATrainer_Type) >= 0;
 }
 
diff --git a/bob/learn/em/kmeans_machine.cpp b/bob/learn/em/kmeans_machine.cpp
index 8f3f55f..352ccde 100644
--- a/bob/learn/em/kmeans_machine.cpp
+++ b/bob/learn/em/kmeans_machine.cpp
@@ -366,7 +366,7 @@ static auto get_mean = bob::extension::FunctionDoc(
   ".. note:: An exception is thrown if i is out of range.", 
   true
 )
-.add_prototype("i")
+.add_prototype("i","mean")
 .add_parameter("i", "int", "Index of the mean")
 .add_return("mean","array_like <float, 1D>","Mean array");
 static PyObject* PyBobLearnEMKMeansMachine_get_mean(PyBobLearnEMKMeansMachineObject* self, PyObject* args, PyObject* kwargs) {
diff --git a/bob/learn/em/kmeans_trainer.cpp b/bob/learn/em/kmeans_trainer.cpp
index d80339c..d346272 100644
--- a/bob/learn/em/kmeans_trainer.cpp
+++ b/bob/learn/em/kmeans_trainer.cpp
@@ -27,7 +27,7 @@ static inline const std::string& IM2string(bob::learn::em::KMeansTrainer::Initia
 
 
 static auto KMeansTrainer_doc = bob::extension::ClassDoc(
-  BOB_EXT_MODULE_PREFIX "._KMeansTrainer",
+  BOB_EXT_MODULE_PREFIX ".KMeansTrainer",
   "Trains a KMeans machine."
   "This class implements the expectation-maximization algorithm for a k-means machine."
   "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006"
@@ -411,8 +411,9 @@ static auto mStep = bob::extension::FunctionDoc(
   0,
   true
 )
-.add_prototype("kmeans_machine")
-.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object");
+.add_prototype("kmeans_machine,data")
+.add_parameter("kmeans_machine", ":py:class:`bob.learn.em.KMeansMachine`", "KMeansMachine Object")
+.add_parameter("data", "array_like <float, 2D>", "Ignored.");
 static PyObject* PyBobLearnEMKMeansTrainer_mStep(PyBobLearnEMKMeansTrainerObject* self, PyObject* args, PyObject* kwargs) {
   BOB_TRY
 
@@ -420,7 +421,11 @@ static PyObject* PyBobLearnEMKMeansTrainer_mStep(PyBobLearnEMKMeansTrainerObject
   char** kwlist = mStep.kwlist(0);
 
   PyBobLearnEMKMeansMachineObject* kmeans_machine;
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine)) return 0;
+  PyBlitzArrayObject* data = 0;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O&", kwlist, &PyBobLearnEMKMeansMachine_Type, &kmeans_machine,
+                                                                 &PyBlitzArray_Converter, &data)) return 0;
+  if(data!=NULL)
+    auto data_ = make_safe(data);
 
   self->cxx->mStep(*kmeans_machine->cxx);
 
@@ -548,6 +553,6 @@ bool init_BobLearnEMKMeansTrainer(PyObject* module)
 
   // add the type to the module
   Py_INCREF(&PyBobLearnEMKMeansTrainer_Type);
-  return PyModule_AddObject(module, "_KMeansTrainer", (PyObject*)&PyBobLearnEMKMeansTrainer_Type) >= 0;
+  return PyModule_AddObject(module, "KMeansTrainer", (PyObject*)&PyBobLearnEMKMeansTrainer_Type) >= 0;
 }
 
diff --git a/bob/learn/em/linear_scoring.cpp b/bob/learn/em/linear_scoring.cpp
index e2cc215..091b3f7 100644
--- a/bob/learn/em/linear_scoring.cpp
+++ b/bob/learn/em/linear_scoring.cpp
@@ -69,7 +69,7 @@ static inline bool f(PyObject* o){return o != 0 && PyObject_IsTrue(o) > 0;}
 
 
 /*** linear_scoring ***/
-static auto linear_scoring1 = bob::extension::FunctionDoc(
+bob::extension::FunctionDoc linear_scoring1 = bob::extension::FunctionDoc(
   "linear_scoring",
   "",
   0,
@@ -84,7 +84,7 @@ static auto linear_scoring1 = bob::extension::FunctionDoc(
 .add_return("output","array_like<float,1>","Score");
 
 
-static auto linear_scoring2 = bob::extension::FunctionDoc(
+bob::extension::FunctionDoc linear_scoring2 = bob::extension::FunctionDoc(
   "linear_scoring",
   "",
   0,
@@ -101,7 +101,7 @@ static auto linear_scoring2 = bob::extension::FunctionDoc(
 
 
 
-static auto linear_scoring3 = bob::extension::FunctionDoc(
+bob::extension::FunctionDoc linear_scoring3 = bob::extension::FunctionDoc(
   "linear_scoring",
   "",
   0,
@@ -116,7 +116,7 @@ static auto linear_scoring3 = bob::extension::FunctionDoc(
 .add_parameter("frame_length_normalisation", "bool", "")
 .add_return("output","array_like<float,1>","Score");
 
-static PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
+PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs) {
     
   //Cheking the number of arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
diff --git a/bob/learn/em/main.cpp b/bob/learn/em/main.cpp
index c166cff..6444c99 100644
--- a/bob/learn/em/main.cpp
+++ b/bob/learn/em/main.cpp
@@ -9,9 +9,6 @@
 #undef NO_IMPORT_ARRAY
 #endif
 #include "main.h"
-#include "ztnorm.cpp"
-#include "linear_scoring.cpp"
-
 
 static PyMethodDef module_methods[] = {
   {
@@ -75,7 +72,6 @@ static PyObject* create_module (void) {
   if (!init_BobLearnEMGMMMachine(module)) return 0;
   if (!init_BobLearnEMKMeansMachine(module)) return 0;
   if (!init_BobLearnEMKMeansTrainer(module)) return 0;
-  //if (!init_BobLearnEMGMMBaseTrainer(module)) return 0;
   if (!init_BobLearnEMMLGMMTrainer(module)) return 0;  
   if (!init_BobLearnEMMAPGMMTrainer(module)) return 0;
 
@@ -130,7 +126,7 @@ static PyObject* create_module (void) {
   if (import_bob_blitz() < 0) return 0;
   if (import_bob_core_random() < 0) return 0;
   if (import_bob_io_base() < 0) return 0;
-  //if (import_bob_learn_linear() < 0) return 0;
+  if (import_bob_learn_linear() < 0) return 0;
 
   Py_INCREF(module);
   return module;
diff --git a/bob/learn/em/main.h b/bob/learn/em/main.h
index 4afa17d..a9750a2 100644
--- a/bob/learn/em/main.h
+++ b/bob/learn/em/main.h
@@ -80,8 +80,6 @@
     return ret;\
   }
 
-static inline char* c(const char* o){return const_cast<char*>(o);}  /* converts const char* to char* */
-
 /// inserts the given key, value pair into the given dictionaries
 static inline int insert_item_string(PyObject* dict, PyObject* entries, const char* key, Py_ssize_t value){
   auto v = make_safe(Py_BuildValue("n", value));
@@ -308,5 +306,21 @@ bool init_BobLearnEMEMPCATrainer(PyObject* module);
 int PyBobLearnEMEMPCATrainer_Check(PyObject* o);
 
 
+//ZT Normalization
+PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs);
+extern bob::extension::FunctionDoc zt_norm;
+
+PyObject* PyBobLearnEM_tNorm(PyObject*, PyObject* args, PyObject* kwargs);
+extern bob::extension::FunctionDoc t_norm;
+
+PyObject* PyBobLearnEM_zNorm(PyObject*, PyObject* args, PyObject* kwargs);
+extern bob::extension::FunctionDoc z_norm;
+
+
+//Linear scoring
+PyObject* PyBobLearnEM_linear_scoring(PyObject*, PyObject* args, PyObject* kwargs);
+extern bob::extension::FunctionDoc linear_scoring1;
+extern bob::extension::FunctionDoc linear_scoring2;
+extern bob::extension::FunctionDoc linear_scoring3;
 
 #endif // BOB_LEARN_EM_MAIN_H
diff --git a/bob/learn/em/plda_trainer.cpp b/bob/learn/em/plda_trainer.cpp
index 70e09f3..1a47d3c 100644
--- a/bob/learn/em/plda_trainer.cpp
+++ b/bob/learn/em/plda_trainer.cpp
@@ -132,10 +132,10 @@ static int PyBobLearnEMPLDATrainer_init_copy(PyBobLearnEMPLDATrainerObject* self
 static int PyBobLearnEMPLDATrainer_init_bool(PyBobLearnEMPLDATrainerObject* self, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = PLDATrainer_doc.kwlist(0);
-  PyObject* use_sum_second_order;
+  PyObject* use_sum_second_order = Py_False;
 
   //Parsing the input argments
-  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBool_Type, &use_sum_second_order))
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|O!", kwlist, &PyBool_Type, &use_sum_second_order))
     return -1;
 
   self->cxx.reset(new bob::learn::em::PLDATrainer(f(use_sum_second_order)));
@@ -149,7 +149,9 @@ static int PyBobLearnEMPLDATrainer_init(PyBobLearnEMPLDATrainerObject* self, PyO
   // get the number of command line arguments
   int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
 
-  if(nargs==1){
+  if(nargs==0)
+    return PyBobLearnEMPLDATrainer_init_bool(self, args, kwargs);
+  else if(nargs==1){
     //Reading the input argument
     PyObject* arg = 0;
     if (PyTuple_Size(args))
@@ -367,6 +369,32 @@ int PyBobLearnEMPLDATrainer_setSigmaMethod(PyBobLearnEMPLDATrainerObject* self,
 }
 
 
+static auto use_sum_second_order = bob::extension::VariableDoc(
+  "use_sum_second_order",
+  "bool",
+  "Tells whether the second order statistics are stored during the training procedure, or only their sum.",
+  ""
+);
+PyObject* PyBobLearnEMPLDATrainer_getUseSumSecondOrder(PyBobLearnEMPLDATrainerObject* self, void*){
+  BOB_TRY
+  return Py_BuildValue("O",self->cxx->getUseSumSecondOrder()?Py_True:Py_False);
+  BOB_CATCH_MEMBER("use_sum_second_order could not be read", 0)
+}
+int PyBobLearnEMPLDATrainer_setUseSumSecondOrder(PyBobLearnEMPLDATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBool_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an str", Py_TYPE(self)->tp_name, use_sum_second_order.name());
+    return -1;
+  }
+  self->cxx->setUseSumSecondOrder(f(value));
+
+  return 0;
+  BOB_CATCH_MEMBER("use_sum_second_order method could not be set", 0)
+}
+
+
+
 static PyGetSetDef PyBobLearnEMPLDATrainer_getseters[] = { 
   {
    z_first_order.name(),
@@ -416,7 +444,14 @@ static PyGetSetDef PyBobLearnEMPLDATrainer_getseters[] = {
    (setter)PyBobLearnEMPLDATrainer_setSigmaMethod,
    init_sigma_method.doc(),
    0
-  },  
+  },
+  {
+   use_sum_second_order.name(),
+   (getter)PyBobLearnEMPLDATrainer_getUseSumSecondOrder,
+   (setter)PyBobLearnEMPLDATrainer_setUseSumSecondOrder,
+   use_sum_second_order.doc(),
+   0
+  },
   {0}  // Sentinel
 };
 
@@ -459,8 +494,8 @@ static PyObject* PyBobLearnEMPLDATrainer_initialize(PyBobLearnEMPLDATrainerObjec
 
 /*** e_step ***/
 static auto e_step = bob::extension::FunctionDoc(
-  "e_step",
-  "e_step before the EM steps",
+  "eStep",
+  "Expectation step before the EM steps",
   "",
   true
 )
@@ -491,8 +526,8 @@ static PyObject* PyBobLearnEMPLDATrainer_e_step(PyBobLearnEMPLDATrainerObject* s
 
 /*** m_step ***/
 static auto m_step = bob::extension::FunctionDoc(
-  "m_step",
-  "m_step before the EM steps",
+  "mStep",
+  "Maximization step ",
   "",
   true
 )
@@ -699,6 +734,5 @@ bool init_BobLearnEMPLDATrainer(PyObject* module)
 
   // add the type to the module
   Py_INCREF(&PyBobLearnEMPLDATrainer_Type);
-  return PyModule_AddObject(module, "_PLDATrainer", (PyObject*)&PyBobLearnEMPLDATrainer_Type) >= 0;
+  return PyModule_AddObject(module, "PLDATrainer", (PyObject*)&PyBobLearnEMPLDATrainer_Type) >= 0;
 }
-
diff --git a/bob/learn/em/test/test_em.py b/bob/learn/em/test/test_em.py
index 85cac67..bd7ffc4 100644
--- a/bob/learn/em/test/test_em.py
+++ b/bob/learn/em/test/test_em.py
@@ -15,6 +15,8 @@ from bob.io.base.test_utils import datafile
 
 from bob.learn.em import KMeansMachine, GMMMachine, KMeansTrainer, \
     ML_GMMTrainer, MAP_GMMTrainer
+    
+import bob.learn.em
 
 #, MAP_GMMTrainer
 
@@ -51,7 +53,8 @@ def test_gmm_ML_1():
   gmm = loadGMM()
   
   ml_gmmtrainer = ML_GMMTrainer(True, True, True)
-  ml_gmmtrainer.train(gmm, ar)
+  #ml_gmmtrainer.train(gmm, ar)
+  bob.learn.em.train(ml_gmmtrainer, gmm, ar, convergence_threshold=0.001)
 
   #config = bob.io.base.HDF5File(datafile('gmm_ML.hdf5", __name__), 'w')
   #gmm.save(config)
@@ -82,14 +85,12 @@ def test_gmm_ML_2():
   prior = 0.001
   max_iter_gmm = 25
   accuracy = 0.00001
-  ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior, converge_by_likelihood=True)
-  ml_gmmtrainer.max_iterations = max_iter_gmm
-  ml_gmmtrainer.convergence_threshold = accuracy
+  ml_gmmtrainer = ML_GMMTrainer(True, True, True, prior)
   
   # Run ML
-  ml_gmmtrainer.train(gmm, ar)
-
-
+  #ml_gmmtrainer.train(gmm, ar)
+  bob.learn.em.train(ml_gmmtrainer, gmm, ar, max_iterations = max_iter_gmm, convergence_threshold=accuracy)
+  
   # Test results
   # Load torch3vision reference
   meansML_ref = bob.io.base.load(datafile('meansAfterML.hdf5', __name__, path="../data/"))
@@ -114,11 +115,9 @@ def test_gmm_MAP_1():
   gmmprior = GMMMachine(bob.io.base.HDF5File(datafile("gmm_ML.hdf5", __name__, path="../data/")))
 
   map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, prior_gmm=gmmprior, relevance_factor=4.)  
-  #map_gmmtrainer.set_prior_gmm(gmmprior)
-  map_gmmtrainer.train(gmm, ar)
-
-  #config = bob.io.base.HDF5File(datafile('gmm_MAP.hdf5", 'w', __name__))
-  #gmm.save(config)
+  
+  #map_gmmtrainer.train(gmm, ar)
+  bob.learn.em.train(map_gmmtrainer, gmm, ar)
 
   gmm_ref = GMMMachine(bob.io.base.HDF5File(datafile('gmm_MAP.hdf5', __name__, path="../data/")))
 
@@ -141,15 +140,15 @@ def test_gmm_MAP_2():
   gmm.weights = weights
 
   map_adapt = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, mean_var_update_responsibilities_threshold=0.,prior_gmm=gmm, relevance_factor=4.)
-  #map_adapt.set_prior_gmm(gmm)
 
   gmm_adapted = GMMMachine(2,50)
   gmm_adapted.means = means
   gmm_adapted.variances = variances
   gmm_adapted.weights = weights
 
-  map_adapt.max_iterations = 1
-  map_adapt.train(gmm_adapted, data)
+  #map_adapt.max_iterations = 1
+  #map_adapt.train(gmm_adapted, data)
+  bob.learn.em.train(map_adapt, gmm_adapted, data, max_iterations = 1)
 
   new_means = bob.io.base.load(datafile('new_adapted_mean.hdf5', __name__, path="../data/"))
 
@@ -184,15 +183,16 @@ def test_gmm_MAP_3():
   max_iter_gmm = 1
   accuracy = 0.00001
   map_factor = 0.5
-  map_gmmtrainer = MAP_GMMTrainer(update_means=True, update_variances=False, update_weights=False, convergence_threshold=prior, prior_gmm=prior_gmm, alpha=map_factor)
-  map_gmmtrainer.max_iterations = max_iter_gmm
-  map_gmmtrainer.convergence_threshold = accuracy
+  map_gmmtrainer = MAP_GMMTrainer(prior_gmm, alpha=map_factor, update_means=True, update_variances=False, update_weights=False, convergence_threshold=prior)
+  #map_gmmtrainer.max_iterations = max_iter_gmm
+  #map_gmmtrainer.convergence_threshold = accuracy
 
   gmm = GMMMachine(n_gaussians, n_inputs)
   gmm.set_variance_thresholds(threshold)
 
   # Train
-  map_gmmtrainer.train(gmm, ar)
+  #map_gmmtrainer.train(gmm, ar)
+  bob.learn.em.train(map_gmmtrainer, gmm, ar, max_iterations = max_iter_gmm, convergence_threshold=accuracy)
 
   # Test results
   # Load torch3vision reference
diff --git a/bob/learn/em/test/test_jfa_trainer.py b/bob/learn/em/test/test_jfa_trainer.py
index cc6e055..1bd520e 100644
--- a/bob/learn/em/test/test_jfa_trainer.py
+++ b/bob/learn/em/test/test_jfa_trainer.py
@@ -81,7 +81,7 @@ def test_JFATrainer_updateYandV():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   m = JFABase(ubm,2,2)
-  t = JFATrainer(10)
+  t = JFATrainer()
   t.initialize(m, TRAINING_STATS)
   m.u = M_u
   m.v = M_v
@@ -115,7 +115,7 @@ def test_JFATrainer_updateXandU():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   m = JFABase(ubm,2,2)
-  t = JFATrainer(10)
+  t = JFATrainer()
   t.initialize(m, TRAINING_STATS)
   m.u = M_u
   m.v = M_v
@@ -148,7 +148,7 @@ def test_JFATrainer_updateZandD():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   m = JFABase(ubm,2,2)
-  t = JFATrainer(10)
+  t = JFATrainer()
   t.initialize(m, TRAINING_STATS)
   m.u = M_u
   m.v = M_v
@@ -173,12 +173,12 @@ def test_JFATrainAndEnrol():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   mb = JFABase(ubm, 2, 2)
-  t = JFATrainer(10)
+  t = JFATrainer()
   t.initialize(mb, TRAINING_STATS)
   mb.u = M_u
   mb.v = M_v
   mb.d = M_d
-  t.train_loop(mb, TRAINING_STATS)
+  bob.learn.em.train_jfa(t,mb, TRAINING_STATS, initialize=False)
 
   v_ref = numpy.array([[0.245364911936476, 0.978133261775424], [0.769646805052223, 0.940070736856596], [0.310779202800089, 1.456332053893072],
         [0.184760934399551, 2.265139705602147], [0.701987784039800, 0.081632150899400], [0.074344030229297, 1.090248340917255]], 'float64')
@@ -225,7 +225,7 @@ def test_ISVTrainAndEnrol():
   ubm.mean_supervector = UBM_MEAN
   ubm.variance_supervector = UBM_VAR
   mb = ISVBase(ubm,2)
-  t = ISVTrainer(10, 4.)
+  t = ISVTrainer(4.)
   #t.train(mb, TRAINING_STATS)
   t.initialize(mb, TRAINING_STATS)
   mb.u = M_u
@@ -252,6 +252,7 @@ def test_ISVTrainAndEnrol():
   t.enrol(m, gse, 5)
   assert numpy.allclose(m.z, z_ref, eps)
 
+
 def test_JFATrainInitialize():
   # Check that the initialization is consistent and using the rng (cf. issue #118)
 
@@ -266,7 +267,7 @@ def test_JFATrainInitialize():
   jb = JFABase(ubm, 2, 2)
   # first round
   rng = bob.core.random.mt19937(0)
-  jt = JFATrainer(10)
+  jt = JFATrainer()
   jt.rng = rng
   jt.initialize(jb, TRAINING_STATS)
   u1 = jb.u
diff --git a/bob/learn/em/test/test_kmeans_trainer.py b/bob/learn/em/test/test_kmeans_trainer.py
index b62e992..8794a92 100644
--- a/bob/learn/em/test/test_kmeans_trainer.py
+++ b/bob/learn/em/test/test_kmeans_trainer.py
@@ -108,7 +108,8 @@ def test_kmeans_a():
   machine = KMeansMachine(2, 1)
 
   trainer = KMeansTrainer()
-  trainer.train(machine, data)
+  #trainer.train(machine, data)
+  bob.learn.em.train(trainer,machine,data)
 
   [variances, weights] = machine.get_variances_and_weights_for_each_cluster(data)
   variances_b = numpy.ndarray(shape=(2,1), dtype=numpy.float64)
@@ -140,9 +141,10 @@ def test_kmeans_b():
 
   trainer = KMeansTrainer()
   #trainer.seed = 1337
-  trainer.train(machine, arStd)
+  bob.learn.em.train(trainer,machine, arStd, convergence_threshold=0.001)
 
   [variances, weights] = machine.get_variances_and_weights_for_each_cluster(arStd)
+    
   means = machine.means
 
   multiplyVectorsByFactors(means, std)
@@ -160,22 +162,11 @@ def test_kmeans_b():
   assert equals(means, gmmMeans, 1e-3)
   assert equals(weights, gmmWeights, 1e-3)
   assert equals(variances, gmmVariances, 1e-3)
-  
-  # Check comparison operators
-  trainer1 = KMeansTrainer()
-  trainer2 = KMeansTrainer()
-  #trainer1.rng = trainer2.rng
-
-  #assert trainer1 == trainer2
-  #assert (trainer1 != trainer2) is False
-  trainer1.max_iterations = 1337
-  #assert (trainer1 == trainer2) is False
-  #assert trainer1 != trainer2
 
   # Check that there is no duplicate means during initialization
   machine = KMeansMachine(2, 1)
   trainer = KMeansTrainer()
   trainer.initialization_method = 'RANDOM_NO_DUPLICATE'
   data = numpy.array([[1.], [1.], [1.], [1.], [1.], [1.], [2.], [3.]])
-  trainer.train(machine, data)
-  assert (numpy.isnan(machine.means).any()) == False
\ No newline at end of file
+  bob.learn.em.train(trainer, machine, data)
+  assert (numpy.isnan(machine.means).any()) == False
diff --git a/bob/learn/em/test/test_plda_trainer.py b/bob/learn/em/test/test_plda_trainer.py
index f72ab6f..02ede06 100644
--- a/bob/learn/em/test/test_plda_trainer.py
+++ b/bob/learn/em/test/test_plda_trainer.py
@@ -13,6 +13,7 @@ import numpy
 import numpy.linalg
 
 from bob.learn.em import PLDATrainer, PLDABase, PLDAMachine
+import bob.learn.em
 
 class PythonPLDATrainer():
   """A simplified (and slower) version of the PLDATrainer"""
@@ -362,7 +363,7 @@ def test_plda_EM_vs_Python():
 
   # Runs the PLDA trainer EM-steps (2 steps)
   # Defines base trainer and machine
-  t = PLDATrainer(10)
+  t = PLDATrainer()
   t_py = PythonPLDATrainer(max_iterations=10)
   m = PLDABase(D,nf,ng)
   m_py = PLDABase(D,nf,ng)
@@ -372,8 +373,10 @@ def test_plda_EM_vs_Python():
   t.init_g_method = 'WITHIN_SCATTER'
   t.init_sigma_method = 'VARIANCE_DATA'
 
-  t.train(m, l)
+  #t.train(m, l)
+  bob.learn.em.train(t, m, l, max_iterations=10)
   t_py.train(m_py, l)
+  
   assert numpy.allclose(m.mu, m_py.mu)
   assert numpy.allclose(m.f, m_py.f)
   assert numpy.allclose(m.g, m_py.g)
@@ -533,7 +536,7 @@ def test_plda_EM_vs_Prince():
   m_py.f = F_init
 
   # E-step 1
-  t.e_step(m,l)
+  t.eStep(m,l)
   t_py.e_step(m_py,l)
   # Compares statistics to Prince matlab reference
   assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
@@ -545,7 +548,7 @@ def test_plda_EM_vs_Prince():
   assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
 
   # M-step 1
-  t.m_step(m,l)
+  t.mStep(m,l)
   t_py.m_step(m_py,l)
   # Compares F, G and sigma to Prince matlab reference
   assert numpy.allclose(m.f, F_1, 1e-10)
@@ -557,7 +560,7 @@ def test_plda_EM_vs_Prince():
   assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
 
   # E-step 2
-  t.e_step(m,l)
+  t.eStep(m,l)
   t_py.e_step(m_py,l)
   # Compares statistics to Prince matlab reference
   assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
@@ -569,7 +572,7 @@ def test_plda_EM_vs_Prince():
   assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
 
   # M-step 2
-  t.m_step(m,l)
+  t.mStep(m,l)
   t_py.m_step(m_py,l)
   # Compares F, G and sigma to Prince matlab reference
   assert numpy.allclose(m.f, F_2, 1e-10)
@@ -595,7 +598,7 @@ def test_plda_EM_vs_Prince():
   m_py.f = F_init
 
   # E-step 1
-  t.e_step(m,l)
+  t.eStep(m,l)
   t_py.e_step(m_py,l)
   # Compares statistics to Prince matlab reference
   assert numpy.allclose(t.z_first_order[0], z_first_order_a_1, 1e-10)
@@ -608,7 +611,7 @@ def test_plda_EM_vs_Prince():
   assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
 
   # M-step 1
-  t.m_step(m,l)
+  t.mStep(m,l)
   t_py.m_step(m_py,l)
   # Compares F, G and sigma to the ones of the python implementation
   assert numpy.allclose(m.f, m_py.f, 1e-10)
@@ -616,7 +619,7 @@ def test_plda_EM_vs_Prince():
   assert numpy.allclose(m.sigma, m_py.sigma, 1e-10)
 
   # E-step 2
-  t.e_step(m,l)
+  t.eStep(m,l)
   t_py.e_step(m_py,l)
   # Compares statistics to Prince matlab reference
   assert numpy.allclose(t.z_first_order[0], z_first_order_a_2, 1e-10)
@@ -629,7 +632,7 @@ def test_plda_EM_vs_Prince():
   assert numpy.allclose(t.z_second_order_sum, t_py.m_sum_z_second_order, 1e-10)
 
   # M-step 2
-  t.m_step(m,l)
+  t.mStep(m,l)
   t_py.m_step(m_py,l)
   # Compares F, G and sigma to the ones of the python implementation
   assert numpy.allclose(m.f, m_py.f, 1e-10)
@@ -718,24 +721,23 @@ def test_plda_comparisons():
   m = PLDABase(4,1,1,1e-8)
   t1.rng.seed(37)
   t1.initialize(m, training_set)
-  t1.e_step(m, training_set)
-  t1.m_step(m, training_set)
+  t1.eStep(m, training_set)
+  t1.mStep(m, training_set)
   assert (t1 == t2 ) is False
   assert t1 != t2
   assert (t1.is_similar_to(t2) ) is False
   t2.rng.seed(37)
   t2.initialize(m, training_set)
-  t2.e_step(m, training_set)
-  t2.m_step(m, training_set)
+  t2.eStep(m, training_set)
+  t2.mStep(m, training_set)
   assert t1 == t2
   assert (t1 != t2 ) is False
   assert t1.is_similar_to(t2)
   t2.rng.seed(77)
   t2.initialize(m, training_set)
-  t2.e_step(m, training_set)
-  t2.m_step(m, training_set)
+  t2.eStep(m, training_set)
+  t2.mStep(m, training_set)
   assert (t1 == t2 ) is False
   assert t1 != t2
   assert (t1.is_similar_to(t2) ) is False
 
-  
diff --git a/bob/learn/em/train.py b/bob/learn/em/train.py
new file mode 100644
index 0000000..fe8b46c
--- /dev/null
+++ b/bob/learn/em/train.py
@@ -0,0 +1,61 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Fri Feb 13 13:18:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+import numpy
+import bob.learn.em
+
+def train(trainer, machine, data, max_iterations = 50, convergence_threshold=None, initialize=True):
+
+  #Initialization
+  if initialize:
+    trainer.initialize(machine, data)
+
+  trainer.eStep(machine, data)  
+  average_output          = 0
+  average_output_previous = 0
+
+  if convergence_threshold!=None and hasattr(trainer,"compute_likelihood"):
+    average_output          = trainer.compute_likelihood(machine)
+  
+  for i in range(max_iterations):
+    average_output_previous = average_output
+    trainer.mStep(machine, data)
+    trainer.eStep(machine, data)
+
+    if convergence_threshold!=None and hasattr(trainer,"compute_likelihood"):    
+      average_output = trainer.compute_likelihood(machine)
+
+    #Terminates if converged (and likelihood computation is set)
+    if convergence_threshold!=None and abs((average_output_previous - average_output)/average_output_previous) <= convergence_threshold:
+      break
+
+  if hasattr(trainer,"finalize"):
+    trainer.finalize(machine, data)
+
+
+def train_jfa(trainer, jfa_base, data, max_iterations=10, initialize=True):
+
+  if initialize:
+    trainer.initialize(jfa_base, data)
+    
+  #V Subspace
+  for i in range(max_iterations):
+    trainer.e_step1(jfa_base, data)
+    trainer.m_step1(jfa_base, data)
+  trainer.finalize1(jfa_base, data)
+
+  #U subspace
+  for i in range(max_iterations):
+    trainer.e_step2(jfa_base, data)
+    trainer.m_step2(jfa_base, data)
+  trainer.finalize2(jfa_base, data)
+
+  # d subspace
+  for i in range(max_iterations):
+    trainer.e_step3(jfa_base, data)
+    trainer.m_step3(jfa_base, data)
+  trainer.finalize3(jfa_base, data)
+
diff --git a/bob/learn/em/ztnorm.cpp b/bob/learn/em/ztnorm.cpp
index b000b5f..a2dc672 100644
--- a/bob/learn/em/ztnorm.cpp
+++ b/bob/learn/em/ztnorm.cpp
@@ -10,7 +10,7 @@
 #include "main.h"
 
 /*** zt_norm ***/
-static auto zt_norm = bob::extension::FunctionDoc(
+bob::extension::FunctionDoc zt_norm = bob::extension::FunctionDoc(
   "ztnorm",
   "",
   0,
@@ -23,7 +23,7 @@ static auto zt_norm = bob::extension::FunctionDoc(
 .add_parameter("rawscores_zprobes_vs_tmodels", "array_like <float, 2D>", "")
 .add_parameter("mask_zprobes_vs_tmodels_istruetrial", "array_like <float, 2D>", "")
 .add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs) {
+PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = zt_norm.kwlist(0);
   
@@ -71,7 +71,7 @@ static PyObject* PyBobLearnEM_ztNorm(PyObject*, PyObject* args, PyObject* kwargs
 
 
 /*** t_norm ***/
-static auto t_norm = bob::extension::FunctionDoc(
+bob::extension::FunctionDoc t_norm = bob::extension::FunctionDoc(
   "tnorm",
   "",
   0,
@@ -81,7 +81,7 @@ static auto t_norm = bob::extension::FunctionDoc(
 .add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
 .add_parameter("rawscores_probes_vs_tmodels", "array_like <float, 2D>", "")
 .add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnEM_tNorm(PyObject*, PyObject* args, PyObject* kwargs) {
+PyObject* PyBobLearnEM_tNorm(PyObject*, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = zt_norm.kwlist(0);
   
@@ -108,7 +108,7 @@ static PyObject* PyBobLearnEM_tNorm(PyObject*, PyObject* args, PyObject* kwargs)
 
 
 /*** z_norm ***/
-static auto z_norm = bob::extension::FunctionDoc(
+bob::extension::FunctionDoc z_norm = bob::extension::FunctionDoc(
   "znorm",
   "",
   0,
@@ -118,7 +118,7 @@ static auto z_norm = bob::extension::FunctionDoc(
 .add_parameter("rawscores_probes_vs_models", "array_like <float, 2D>", "")
 .add_parameter("rawscores_zprobes_vs_models", "array_like <float, 2D>", "")
 .add_return("output","array_like <float, 2D>","");
-static PyObject* PyBobLearnEM_zNorm(PyObject*, PyObject* args, PyObject* kwargs) {
+PyObject* PyBobLearnEM_zNorm(PyObject*, PyObject* args, PyObject* kwargs) {
 
   char** kwlist = zt_norm.kwlist(0);
   
diff --git a/doc/guide.rst b/doc/guide.rst
index d7895aa..0c11a33 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -391,10 +391,8 @@ be called.
    :options: +NORMALIZE_WHITESPACE
 
    >>> kmeansTrainer = bob.learn.em.KMeansTrainer()
-   >>> kmeansTrainer.max_iterations = 200
-   >>> kmeansTrainer.convergence_threshold = 1e-5
 
-   >>> kmeansTrainer.train(kmeans, data) # Train the KMeansMachine
+   >>> bob.learn.em.train(kmeansTrainer, kmeans, data, max_iterations = 200, convergence_threshold = 1e-5) # Train the KMeansMachine   
    >>> print(kmeans.means)
    [[ -6.   6.  -100.5]
     [  3.5 -3.5   99. ]]
@@ -428,9 +426,7 @@ and the criterion used to determine if the parameters have converged.
    :options: +NORMALIZE_WHITESPACE
 
    >>> trainer = bob.learn.em.ML_GMMTrainer(True, True, True) # update means/variances/weights at each iteration
-   >>> trainer.convergence_threshold = 1e-5
-   >>> trainer.max_iterations = 200
-   >>> trainer.train(gmm, data)
+   >>> bob.learn.em.train(trainer, gmm, data, max_iterations = 200, convergence_threshold = 1e-5)
    >>> print(gmm) # doctest: +SKIP
 
 
@@ -476,7 +472,7 @@ set.
    >>> trainer.convergence_threshold = 1e-5
    >>> trainer.max_iterations = 200
    >>> gmmAdapted = bob.learn.em.GMMMachine(2,3) # Create a new machine for the MAP estimate
-   >>> trainer.train(gmmAdapted, dataMAP)
+   >>> bob.learn.em.train(trainer, gmmAdapted, dataMAP)
    >>> print(gmmAdapted) # doctest: +SKIP
 
 
@@ -529,7 +525,7 @@ Next, we initialize a trainer, which is an instance of
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> jfa_trainer = bob.learn.em.JFATrainer(10) # 10 is the number of iterations
+   >>> jfa_trainer = bob.learn.em.JFATrainer()
 
 The training process is started by calling the
 :py:meth:`bob.learn.em.JFATrainer.train`.
@@ -537,7 +533,7 @@ The training process is started by calling the
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> jfa_trainer.train(jfa_base, TRAINING_STATS)
+   >>> bob.learn.em.train_jfa(jfa_trainer, jfa_base, TRAINING_STATS, max_iterations=10)
 
 Once the training is finished (i.e. the subspaces :math:`U`, :math:`V` and
 :math:`D` are estimated), the JFA model can be shared and used by several
@@ -590,7 +586,7 @@ Next, we initialize a trainer, which is an instance of
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> isv_trainer = bob.learn.em.ISVTrainer(10, 4.) # 10 is the number of iterations, and 4 is the relevance factor
+   >>> isv_trainer = bob.learn.em.ISVTrainer(relevance_factor=4.) # 4 is the relevance factor
 
 The training process is started by calling the
 :py:meth:`bob.learn.em.ISVTrainer.train`.
@@ -598,7 +594,7 @@ The training process is started by calling the
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> isv_trainer.train(isv_base, TRAINING_STATS)
+   >>> bob.learn.em.train(isv_trainer, isv_base, TRAINING_STATS, max_iterations=10)
 
 Once the training is finished (i.e. the subspaces :math:`V` and :math:`D` are
 estimated), the ISV model can be shared and used by several class-specific
@@ -638,7 +634,7 @@ Next, we initialize a trainer, which is an instance of
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> ivec_trainer = bob.learn.em.IVectorTrainer(update_sigma=True, max_iterations=10)
+   >>> ivec_trainer = bob.learn.em.IVectorTrainer(update_sigma=True)
    >>> TRAINING_STATS_flatten = [gs11, gs12, gs21, gs22]
 
 The training process is started by calling the
@@ -647,7 +643,7 @@ The training process is started by calling the
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> ivec_trainer.train(m, TRAINING_STATS_flatten)
+   >>> bob.learn.em.train(ivec_trainer, m, TRAINING_STATS_flatten, max_iterations=10)
 
 More information about the training process can be found in [15]_.
 
@@ -693,7 +689,7 @@ Learning a PLDA model can be performed by instantiating the class
    >>> pldabase = bob.learn.em.PLDABase(3,1,2)
 
    >>> trainer = bob.learn.em.PLDATrainer()
-   >>> trainer.train(pldabase, data)
+   >>> bob.learn.em.train(trainer, pldabase, data, max_iterations=10)
 
 Once trained, this PLDA model can be used to compute the log-likelihood of a
 set of samples given some hypothesis. For this purpose, a
-- 
GitLab