From 53f4259a00d7337bb4913fe16a011c0408127fb3 Mon Sep 17 00:00:00 2001
From: Tiago Freitas Pereira <tiagofrepereira@gmail.com>
Date: Sun, 1 Feb 2015 21:32:16 +0100
Subject: [PATCH] Binding JFATrainer

---
 bob/learn/misc/__jfa_trainer__.py             |  73 ++
 bob/learn/misc/cpp/FABaseTrainer.cpp          | 547 +++++++++++
 bob/learn/misc/cpp/ISVTrainer.cpp             | 144 +++
 bob/learn/misc/cpp/JFATrainer.cpp             | 679 +-------------
 .../include/bob.learn.misc/FABaseTrainer.h    | 350 +++++++
 .../misc/include/bob.learn.misc/ISVTrainer.h  | 158 ++++
 .../misc/include/bob.learn.misc/JFATrainer.h  | 470 +---------
 bob/learn/misc/jfa_trainer.cpp                | 873 ++++++++++++++++++
 bob/learn/misc/main.cpp                       |   7 +-
 bob/learn/misc/main.h                         |  17 +-
 setup.py                                      |   9 +-
 11 files changed, 2199 insertions(+), 1128 deletions(-)
 create mode 100644 bob/learn/misc/__jfa_trainer__.py
 create mode 100644 bob/learn/misc/cpp/FABaseTrainer.cpp
 create mode 100644 bob/learn/misc/cpp/ISVTrainer.cpp
 create mode 100644 bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h
 create mode 100644 bob/learn/misc/include/bob.learn.misc/ISVTrainer.h
 create mode 100644 bob/learn/misc/jfa_trainer.cpp

diff --git a/bob/learn/misc/__jfa_trainer__.py b/bob/learn/misc/__jfa_trainer__.py
new file mode 100644
index 0000000..a2e661b
--- /dev/null
+++ b/bob/learn/misc/__jfa_trainer__.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+# Sun Fev 01 21:10:10 2015 +0200
+#
+# Copyright (C) 2011-2015 Idiap Research Institute, Martigny, Switzerland
+
+from ._library import _JFATrainer
+import numpy
+
+# define the class
+class JFATrainer (_JFATrainer):
+
+  def __init__(self, max_iterations=10):
+    """
+    :py:class:`bob.learn.misc.JFATrainer` constructor
+
+    Keyword Parameters:
+      max_iterations
+        Number of maximum iterations
+    """
+
+    _JFATrainer.__init__(self)
+    self._max_iterations         = max_iterations
+
+
+  def train_loop(self, jfa_base, data):
+    """
+    Train the :py:class:`bob.learn.misc.JFABase` using data
+
+    Keyword Parameters:
+      jfa_base
+        The `:py:class:bob.learn.misc.JFABase` class
+      data
+        The data to be trained
+    """
+    #V Subspace
+    for i in self._max_iterations:
+      self.eStep1(jfa_base, data)
+      self.mStep1(jfa_base, data)
+    self.finalize1(jfa_base, data)
+
+    #U subspace
+    for i in self._max_iterations:
+      self.eStep2(jfa_base, data)
+      self.mStep2(jfa_base, data)
+    self.finalize2(jfa_base, data)
+
+    # d subspace
+    for i in self._max_iterations:
+      self.eStep3(jfa_base, data)
+      self.mStep3(jfa_base, data)
+    self.finalize3(jfa_base, data)
+
+
+  def train(self, jfa_base, data):
+    """
+    Train the :py:class:`bob.learn.misc.JFABase` using data
+
+    Keyword Parameters:
+      jfa_base
+        The `:py:class:bob.learn.misc.JFABase` class
+      data
+        The data to be trained
+    """
+    self.initialize(jfa_base, data)
+    self.train_loop(jfa_base, data)
+
+
+  def enrol(self, jfa_base, data):
+
+# copy the documentation from the base class
+__doc__ = _KMeansTrainer.__doc__
diff --git a/bob/learn/misc/cpp/FABaseTrainer.cpp b/bob/learn/misc/cpp/FABaseTrainer.cpp
new file mode 100644
index 0000000..5827859
--- /dev/null
+++ b/bob/learn/misc/cpp/FABaseTrainer.cpp
@@ -0,0 +1,547 @@
+/**
+ * @date Sat Jan 31 17:16:17 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief FABaseTrainer functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.misc/FABaseTrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.math/linear.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+
+
+bob::learn::misc::FABaseTrainer::FABaseTrainer():
+  m_Nid(0), m_dim_C(0), m_dim_D(0), m_dim_ru(0), m_dim_rv(0),
+  m_x(0), m_y(0), m_z(0), m_Nacc(0), m_Facc(0)
+{
+}
+
+bob::learn::misc::FABaseTrainer::FABaseTrainer(const bob::learn::misc::FABaseTrainer& other)
+{
+}
+
+bob::learn::misc::FABaseTrainer::~FABaseTrainer()
+{
+}
+
+void bob::learn::misc::FABaseTrainer::checkStatistics(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  for (size_t id=0; id<stats.size(); ++id) {
+    for (size_t s=0; s<stats[id].size(); ++s) {
+      if (stats[id][s]->sumPx.extent(0) != (int)m_dim_C) {
+        boost::format m("GMMStats C dimension parameter = %d is different than the expected value of %d");
+        m % stats[id][s]->sumPx.extent(0) % (int)m_dim_C;
+        throw std::runtime_error(m.str());
+      }
+      if (stats[id][s]->sumPx.extent(1) != (int)m_dim_D) {
+        boost::format m("GMMStats D dimension parameter = %d is different than the expected value of %d");
+        m % stats[id][s]->sumPx.extent(1) % (int)m_dim_D;
+        throw std::runtime_error(m.str());
+      }
+    }
+  }
+}
+
+
+void bob::learn::misc::FABaseTrainer::initUbmNidSumStatistics(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  m_Nid = stats.size();
+  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm = m.getUbm();
+  // Put UBM in cache
+  m_dim_C = ubm->getNGaussians();
+  m_dim_D = ubm->getNInputs();
+  m_dim_ru = m.getDimRu();
+  m_dim_rv = m.getDimRv();
+  // Check statistics dimensionality
+  checkStatistics(m, stats);
+  // Precomputes the sum of the statistics for each client/identity
+  precomputeSumStatisticsN(stats);
+  precomputeSumStatisticsF(stats);
+  // Cache and working arrays
+  initCache();
+}
+
+void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsN(
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  m_Nacc.clear();
+  blitz::Array<double,1> Nsum(m_dim_C);
+  for (size_t id=0; id<stats.size(); ++id) {
+    Nsum = 0.;
+    for (size_t s=0; s<stats[id].size(); ++s) {
+      Nsum += stats[id][s]->n;
+    }
+    m_Nacc.push_back(bob::core::array::ccopy(Nsum));
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsF(
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  m_Facc.clear();
+  blitz::Array<double,1> Fsum(m_dim_C*m_dim_D);
+  for (size_t id=0; id<stats.size(); ++id) {
+    Fsum = 0.;
+    for (size_t s=0; s<stats[id].size(); ++s) {
+      for (size_t c=0; c<m_dim_C; ++c) {
+        blitz::Array<double,1> Fsum_c = Fsum(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+        Fsum_c += stats[id][s]->sumPx(c,blitz::Range::all());
+      }
+    }
+    m_Facc.push_back(bob::core::array::ccopy(Fsum));
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& vec)
+{
+  m_x.clear();
+  m_y.clear();
+  m_z.clear();
+
+  blitz::Array<double,1> z0(m_dim_C*m_dim_D);
+  z0 = 0;
+  blitz::Array<double,1> y0(m_dim_rv);
+  y0 = 0;
+  blitz::Array<double,2> x0(m_dim_ru,0);
+  x0 = 0;
+  for (size_t i=0; i<vec.size(); ++i)
+  {
+    m_z.push_back(bob::core::array::ccopy(z0));
+    m_y.push_back(bob::core::array::ccopy(y0));
+    x0.resize(m_dim_ru, vec[i].size());
+    x0 = 0;
+    m_x.push_back(bob::core::array::ccopy(x0));
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::resetXYZ()
+{
+  for (size_t i=0; i<m_x.size(); ++i)
+  {
+    m_x[i] = 0.;
+    m_y[i] = 0.;
+    m_z[i] = 0.;
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::initCache()
+{
+  const size_t dim_CD = m_dim_C*m_dim_D;
+  // U
+  m_cache_UtSigmaInv.resize(m_dim_ru, dim_CD);
+  m_cache_UProd.resize(m_dim_C, m_dim_ru, m_dim_ru);
+  m_cache_IdPlusUProd_ih.resize(m_dim_ru, m_dim_ru);
+  m_cache_Fn_x_ih.resize(dim_CD);
+  m_acc_U_A1.resize(m_dim_C, m_dim_ru, m_dim_ru);
+  m_acc_U_A2.resize(dim_CD, m_dim_ru);
+  // V
+  m_cache_VtSigmaInv.resize(m_dim_rv, dim_CD);
+  m_cache_VProd.resize(m_dim_C, m_dim_rv, m_dim_rv);
+  m_cache_IdPlusVProd_i.resize(m_dim_rv, m_dim_rv);
+  m_cache_Fn_y_i.resize(dim_CD);
+  m_acc_V_A1.resize(m_dim_C, m_dim_rv, m_dim_rv);
+  m_acc_V_A2.resize(dim_CD, m_dim_rv);
+  // D
+  m_cache_DtSigmaInv.resize(dim_CD);
+  m_cache_DProd.resize(dim_CD);
+  m_cache_IdPlusDProd_i.resize(dim_CD);
+  m_cache_Fn_z_i.resize(dim_CD);
+  m_acc_D_A1.resize(dim_CD);
+  m_acc_D_A2.resize(dim_CD);
+
+  // tmp
+  m_tmp_CD.resize(dim_CD);
+  m_tmp_CD_b.resize(dim_CD);
+
+  m_tmp_ru.resize(m_dim_ru);
+  m_tmp_ruD.resize(m_dim_ru, m_dim_D);
+  m_tmp_ruru.resize(m_dim_ru, m_dim_ru);
+
+  m_tmp_rv.resize(m_dim_rv);
+  m_tmp_rvD.resize(m_dim_rv, m_dim_D);
+  m_tmp_rvrv.resize(m_dim_rv, m_dim_rv);
+}
+
+
+
+//////////////////////////// V ///////////////////////////
+void bob::learn::misc::FABaseTrainer::computeVtSigmaInv(const bob::learn::misc::FABase& m)
+{
+  const blitz::Array<double,2>& V = m.getV();
+  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
+  // provide a non-const version of transpose())
+  const blitz::Array<double,2> Vt = const_cast<blitz::Array<double,2>&>(V).transpose(1,0);
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  m_cache_VtSigmaInv = Vt(i,j) / sigma(j); // Vt * diag(sigma)^-1
+}
+
+void bob::learn::misc::FABaseTrainer::computeVProd(const bob::learn::misc::FABase& m)
+{
+  const blitz::Array<double,2>& V = m.getV();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  blitz::Range rall = blitz::Range::all();
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
+    blitz::Array<double,2> Vv_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
+    blitz::Array<double,2> Vt_c = Vv_c.transpose(1,0);
+    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+    m_tmp_rvD = Vt_c(i,j) / sigma_c(j); // Vt_c * diag(sigma)^-1
+    bob::math::prod(m_tmp_rvD, Vv_c, VProd_c);
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
+{
+  const blitz::Array<double,1>& Ni = m_Nacc[id];
+  bob::math::eye(m_tmp_rvrv); // m_tmp_rvrv = I
+  blitz::Range rall = blitz::Range::all();
+  for (size_t c=0; c<m_dim_C; ++c) {
+    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
+    m_tmp_rvrv += VProd_c * Ni(c);
+  }
+  bob::math::inv(m_tmp_rvrv, m_cache_IdPlusVProd_i); // m_cache_IdPlusVProd_i = ( I+Vt*diag(sigma)^-1*Ni*V)^-1
+}
+
+void bob::learn::misc::FABaseTrainer::computeFn_y_i(const bob::learn::misc::FABase& mb,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
+{
+  const blitz::Array<double,2>& U = mb.getU();
+  const blitz::Array<double,1>& d = mb.getD();
+  // Compute Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h}) (Normalised first order statistics)
+  const blitz::Array<double,1>& Fi = m_Facc[id];
+  const blitz::Array<double,1>& m = mb.getUbmMean();
+  const blitz::Array<double,1>& z = m_z[id];
+  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
+  m_cache_Fn_y_i = Fi - m_tmp_CD * (m + d * z); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i})
+  const blitz::Array<double,2>& X = m_x[id];
+  blitz::Range rall = blitz::Range::all();
+  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
+  {
+    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
+    bob::math::prod(U, Xh, m_tmp_CD_b); // m_tmp_CD_b = U*x_{i,h}
+    const blitz::Array<double,1>& Nih = stats[h]->n;
+    bob::core::array::repelem(Nih, m_tmp_CD);
+    m_cache_Fn_y_i -= m_tmp_CD * m_tmp_CD_b; // N_{i,h} * U * x_{i,h}
+  }
+  // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+}
+
+void bob::learn::misc::FABaseTrainer::updateY_i(const size_t id)
+{
+  // Computes yi = Ayi * Cvs * Fn_yi
+  blitz::Array<double,1>& y = m_y[id];
+  // m_tmp_rv = m_cache_VtSigmaInv * m_cache_Fn_y_i = Vt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+  bob::math::prod(m_cache_VtSigmaInv, m_cache_Fn_y_i, m_tmp_rv);
+  bob::math::prod(m_cache_IdPlusVProd_i, m_tmp_rv, y);
+}
+
+void bob::learn::misc::FABaseTrainer::updateY(const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  // Precomputation
+  computeVtSigmaInv(m);
+  computeVProd(m);
+  // Loops over all people
+  for (size_t id=0; id<stats.size(); ++id) {
+    computeIdPlusVProd_i(id);
+    computeFn_y_i(m, stats[id], id);
+    updateY_i(id);
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::computeAccumulatorsV(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  // Initializes the cache accumulator
+  m_acc_V_A1 = 0.;
+  m_acc_V_A2 = 0.;
+  // Loops over all people
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  blitz::Range rall = blitz::Range::all();
+  for (size_t id=0; id<stats.size(); ++id) {
+    computeIdPlusVProd_i(id);
+    computeFn_y_i(m, stats[id], id);
+
+    // Needs to return values to be accumulated for estimating V
+    const blitz::Array<double,1>& y = m_y[id];
+    m_tmp_rvrv = m_cache_IdPlusVProd_i;
+    m_tmp_rvrv += y(i) * y(j);
+    for (size_t c=0; c<m_dim_C; ++c)
+    {
+      blitz::Array<double,2> A1_y_c = m_acc_V_A1(c, rall, rall);
+      A1_y_c += m_tmp_rvrv * m_Nacc[id](c);
+    }
+    m_acc_V_A2 += m_cache_Fn_y_i(i) * y(j);
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::updateV(blitz::Array<double,2>& V)
+{
+  blitz::Range rall = blitz::Range::all();
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    const blitz::Array<double,2> A1 = m_acc_V_A1(c, rall, rall);
+    bob::math::inv(A1, m_tmp_rvrv);
+    const blitz::Array<double,2> A2 = m_acc_V_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
+    blitz::Array<double,2> V_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
+    bob::math::prod(A2, m_tmp_rvrv, V_c);
+  }
+}
+
+
+//////////////////////////// U ///////////////////////////
+void bob::learn::misc::FABaseTrainer::computeUtSigmaInv(const bob::learn::misc::FABase& m)
+{
+  const blitz::Array<double,2>& U = m.getU();
+  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
+  // provide a non-const version of transpose())
+  const blitz::Array<double,2> Ut = const_cast<blitz::Array<double,2>&>(U).transpose(1,0);
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  m_cache_UtSigmaInv = Ut(i,j) / sigma(j); // Ut * diag(sigma)^-1
+}
+
+void bob::learn::misc::FABaseTrainer::computeUProd(const bob::learn::misc::FABase& m)
+{
+  const blitz::Array<double,2>& U = m.getU();
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    blitz::Array<double,2> UProd_c = m_cache_UProd(c, blitz::Range::all(), blitz::Range::all());
+    blitz::Array<double,2> Uu_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), blitz::Range::all());
+    blitz::Array<double,2> Ut_c = Uu_c.transpose(1,0);
+    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+    m_tmp_ruD = Ut_c(i,j) / sigma_c(j); // Ut_c * diag(sigma)^-1
+    bob::math::prod(m_tmp_ruD, Uu_c, UProd_c);
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::computeIdPlusUProd_ih(
+  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats)
+{
+  const blitz::Array<double,1>& Nih = stats->n;
+  bob::math::eye(m_tmp_ruru); // m_tmp_ruru = I
+  for (size_t c=0; c<m_dim_C; ++c) {
+    blitz::Array<double,2> UProd_c = m_cache_UProd(c,blitz::Range::all(),blitz::Range::all());
+    m_tmp_ruru += UProd_c * Nih(c);
+  }
+  bob::math::inv(m_tmp_ruru, m_cache_IdPlusUProd_ih); // m_cache_IdPlusUProd_ih = ( I+Ut*diag(sigma)^-1*Ni*U)^-1
+}
+
+void bob::learn::misc::FABaseTrainer::computeFn_x_ih(const bob::learn::misc::FABase& mb,
+  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id)
+{
+  const blitz::Array<double,2>& V = mb.getV();
+  const blitz::Array<double,1>& d =  mb.getD();
+  // Compute Fn_x_ih = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i}) (Normalised first order statistics)
+  const blitz::Array<double,2>& Fih = stats->sumPx;
+  const blitz::Array<double,1>& m = mb.getUbmMean();
+  const blitz::Array<double,1>& z = m_z[id];
+  const blitz::Array<double,1>& Nih = stats->n;
+  bob::core::array::repelem(Nih, m_tmp_CD);
+  for (size_t c=0; c<m_dim_C; ++c) {
+    blitz::Array<double,1> Fn_x_ih_c = m_cache_Fn_x_ih(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
+    Fn_x_ih_c = Fih(c,blitz::Range::all());
+  }
+  m_cache_Fn_x_ih -= m_tmp_CD * (m + d * z); // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i})
+
+  const blitz::Array<double,1>& y = m_y[id];
+  bob::math::prod(V, y, m_tmp_CD_b);
+  m_cache_Fn_x_ih -= m_tmp_CD * m_tmp_CD_b;
+  // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
+}
+
+void bob::learn::misc::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
+{
+  // Computes xih = Axih * Cus * Fn_x_ih
+  blitz::Array<double,1> x = m_x[id](blitz::Range::all(), h);
+  // m_tmp_ru = m_cache_UtSigmaInv * m_cache_Fn_x_ih = Ut*diag(sigma)^-1 * N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
+  bob::math::prod(m_cache_UtSigmaInv, m_cache_Fn_x_ih, m_tmp_ru);
+  bob::math::prod(m_cache_IdPlusUProd_ih, m_tmp_ru, x);
+}
+
+void bob::learn::misc::FABaseTrainer::updateX(const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  // Precomputation
+  computeUtSigmaInv(m);
+  computeUProd(m);
+  // Loops over all people
+  for (size_t id=0; id<stats.size(); ++id) {
+    int n_session_i = stats[id].size();
+    for (int s=0; s<n_session_i; ++s) {
+      computeIdPlusUProd_ih(stats[id][s]);
+      computeFn_x_ih(m, stats[id][s], id);
+      updateX_ih(id, s);
+    }
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::computeAccumulatorsU(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  // Initializes the cache accumulator
+  m_acc_U_A1 = 0.;
+  m_acc_U_A2 = 0.;
+  // Loops over all people
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  blitz::Range rall = blitz::Range::all();
+  for (size_t id=0; id<stats.size(); ++id) {
+    int n_session_i = stats[id].size();
+    for (int h=0; h<n_session_i; ++h) {
+      computeIdPlusUProd_ih(stats[id][h]);
+      computeFn_x_ih(m, stats[id][h], id);
+
+      // Needs to return values to be accumulated for estimating U
+      blitz::Array<double,1> x = m_x[id](rall, h);
+      m_tmp_ruru = m_cache_IdPlusUProd_ih;
+      m_tmp_ruru += x(i) * x(j);
+      for (int c=0; c<(int)m_dim_C; ++c)
+      {
+        blitz::Array<double,2> A1_x_c = m_acc_U_A1(c,rall,rall);
+        A1_x_c += m_tmp_ruru * stats[id][h]->n(c);
+      }
+      m_acc_U_A2 += m_cache_Fn_x_ih(i) * x(j);
+    }
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::updateU(blitz::Array<double,2>& U)
+{
+  for (size_t c=0; c<m_dim_C; ++c)
+  {
+    const blitz::Array<double,2> A1 = m_acc_U_A1(c,blitz::Range::all(),blitz::Range::all());
+    bob::math::inv(A1, m_tmp_ruru);
+    const blitz::Array<double,2> A2 = m_acc_U_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
+    blitz::Array<double,2> U_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
+    bob::math::prod(A2, m_tmp_ruru, U_c);
+  }
+}
+
+
+//////////////////////////// D ///////////////////////////
+void bob::learn::misc::FABaseTrainer::computeDtSigmaInv(const bob::learn::misc::FABase& m)
+{
+  const blitz::Array<double,1>& d = m.getD();
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  m_cache_DtSigmaInv = d / sigma; // Dt * diag(sigma)^-1
+}
+
+void bob::learn::misc::FABaseTrainer::computeDProd(const bob::learn::misc::FABase& m)
+{
+  const blitz::Array<double,1>& d = m.getD();
+  const blitz::Array<double,1>& sigma = m.getUbmVariance();
+  m_cache_DProd = d / sigma * d; // Dt * diag(sigma)^-1 * D
+}
+
+void bob::learn::misc::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
+{
+  const blitz::Array<double,1>& Ni = m_Nacc[id];
+  bob::core::array::repelem(Ni, m_tmp_CD); // m_tmp_CD = Ni 'repmat'
+  m_cache_IdPlusDProd_i = 1.; // m_cache_IdPlusDProd_i = Id
+  m_cache_IdPlusDProd_i += m_cache_DProd * m_tmp_CD; // m_cache_IdPlusDProd_i = I+Dt*diag(sigma)^-1*Ni*D
+  m_cache_IdPlusDProd_i = 1 / m_cache_IdPlusDProd_i; // m_cache_IdPlusVProd_i = (I+Dt*diag(sigma)^-1*Ni*D)^-1
+}
+
+void bob::learn::misc::FABaseTrainer::computeFn_z_i(
+  const bob::learn::misc::FABase& mb,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
+{
+  const blitz::Array<double,2>& U = mb.getU();
+  const blitz::Array<double,2>& V = mb.getV();
+  // Compute Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h}) (Normalised first order statistics)
+  const blitz::Array<double,1>& Fi = m_Facc[id];
+  const blitz::Array<double,1>& m = mb.getUbmMean();
+  const blitz::Array<double,1>& y = m_y[id];
+  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
+  bob::math::prod(V, y, m_tmp_CD_b); // m_tmp_CD_b = V * y
+  m_cache_Fn_z_i = Fi - m_tmp_CD * (m + m_tmp_CD_b); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i})
+
+  const blitz::Array<double,2>& X = m_x[id];
+  blitz::Range rall = blitz::Range::all();
+  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
+  {
+    const blitz::Array<double,1>& Nh = stats[h]->n; // Nh = N_{i,h} (length: C)
+    bob::core::array::repelem(Nh, m_tmp_CD);
+    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
+    bob::math::prod(U, Xh, m_tmp_CD_b);
+    m_cache_Fn_z_i -= m_tmp_CD * m_tmp_CD_b;
+  }
+  // Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
+}
+
+void bob::learn::misc::FABaseTrainer::updateZ_i(const size_t id)
+{
+  // Computes zi = Azi * D^T.Sigma^-1 * Fn_zi
+  blitz::Array<double,1>& z = m_z[id];
+  // m_tmp_CD = m_cache_DtSigmaInv * m_cache_Fn_z_i = Dt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
+  z = m_cache_IdPlusDProd_i * m_cache_DtSigmaInv * m_cache_Fn_z_i;
+}
+
+void bob::learn::misc::FABaseTrainer::updateZ(const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  // Precomputation
+  computeDtSigmaInv(m);
+  computeDProd(m);
+  // Loops over all people
+  for (size_t id=0; id<m_Nid; ++id) {
+    computeIdPlusDProd_i(id);
+    computeFn_z_i(m, stats[id], id);
+    updateZ_i(id);
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::computeAccumulatorsD(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
+{
+  // Initializes the cache accumulator
+  m_acc_D_A1 = 0.;
+  m_acc_D_A2 = 0.;
+  // Loops over all people
+  blitz::firstIndex i;
+  blitz::secondIndex j;
+  for (size_t id=0; id<stats.size(); ++id) {
+    computeIdPlusDProd_i(id);
+    computeFn_z_i(m, stats[id], id);
+
+    // Needs to return values to be accumulated for estimating D
+    blitz::Array<double,1> z = m_z[id];
+    bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
+    m_acc_D_A1 += (m_cache_IdPlusDProd_i + z * z) * m_tmp_CD;
+    m_acc_D_A2 += m_cache_Fn_z_i * z;
+  }
+}
+
+void bob::learn::misc::FABaseTrainer::updateD(blitz::Array<double,1>& d)
+{
+  d = m_acc_D_A2 / m_acc_D_A1;
+}
+
+
diff --git a/bob/learn/misc/cpp/ISVTrainer.cpp b/bob/learn/misc/cpp/ISVTrainer.cpp
new file mode 100644
index 0000000..d0455e9
--- /dev/null
+++ b/bob/learn/misc/cpp/ISVTrainer.cpp
@@ -0,0 +1,144 @@
+/**
+ * @date Tue Jul 19 12:16:17 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief Joint Factor Analysis Trainer
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include <bob.learn.misc/ISVTrainer.h>
+#include <bob.core/check.h>
+#include <bob.core/array_copy.h>
+#include <bob.core/array_random.h>
+#include <bob.math/inv.h>
+#include <bob.math/linear.h>
+#include <bob.core/check.h>
+#include <bob.core/array_repmat.h>
+#include <algorithm>
+
+
+//////////////////////////// ISVTrainer ///////////////////////////
+bob::learn::misc::ISVTrainer::ISVTrainer(const size_t max_iterations, const double relevance_factor):
+  EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
+    (0.001, max_iterations, false),
+  m_relevance_factor(relevance_factor)
+{
+}
+
+bob::learn::misc::ISVTrainer::ISVTrainer(const bob::learn::misc::ISVTrainer& other):
+  EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
+    (other.m_convergence_threshold, other.m_max_iterations,
+     other.m_compute_likelihood),
+  m_relevance_factor(other.m_relevance_factor)
+{
+}
+
+bob::learn::misc::ISVTrainer::~ISVTrainer()
+{
+}
+
+bob::learn::misc::ISVTrainer& bob::learn::misc::ISVTrainer::operator=
+(const bob::learn::misc::ISVTrainer& other)
+{
+  if (this != &other)
+  {
+    bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
+      std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::operator=(other);
+    m_relevance_factor = other.m_relevance_factor;
+  }
+  return *this;
+}
+
+bool bob::learn::misc::ISVTrainer::operator==(const bob::learn::misc::ISVTrainer& b) const
+{
+  return bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
+            std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::operator==(b) &&
+          m_relevance_factor == b.m_relevance_factor;
+}
+
+bool bob::learn::misc::ISVTrainer::operator!=(const bob::learn::misc::ISVTrainer& b) const
+{
+  return !(this->operator==(b));
+}
+
+bool bob::learn::misc::ISVTrainer::is_similar_to(const bob::learn::misc::ISVTrainer& b,
+  const double r_epsilon, const double a_epsilon) const
+{
+  return bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
+            std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::is_similar_to(b, r_epsilon, a_epsilon) &&
+          m_relevance_factor == b.m_relevance_factor;
+}
+
+void bob::learn::misc::ISVTrainer::initialize(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
+{
+  m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
+  m_base_trainer.initializeXYZ(ar);
+
+  blitz::Array<double,2>& U = machine.updateU();
+  bob::core::array::randn(*m_rng, U);
+  initializeD(machine);
+  machine.precompute();
+}
+
+void bob::learn::misc::ISVTrainer::initializeD(bob::learn::misc::ISVBase& machine) const
+{
+  // D = sqrt(variance(UBM) / relevance_factor)
+  blitz::Array<double,1> d = machine.updateD();
+  d = sqrt(machine.getBase().getUbmVariance() / m_relevance_factor);
+}
+
+void bob::learn::misc::ISVTrainer::finalize(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
+{
+}
+
+void bob::learn::misc::ISVTrainer::eStep(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
+{
+  m_base_trainer.resetXYZ();
+
+  const bob::learn::misc::FABase& base = machine.getBase();
+  m_base_trainer.updateX(base, ar);
+  m_base_trainer.updateZ(base, ar);
+  m_base_trainer.computeAccumulatorsU(base, ar);
+}
+
+void bob::learn::misc::ISVTrainer::mStep(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
+{
+  blitz::Array<double,2>& U = machine.updateU();
+  m_base_trainer.updateU(U);
+  machine.precompute();
+}
+
+double bob::learn::misc::ISVTrainer::computeLikelihood(bob::learn::misc::ISVBase& machine)
+{
+  // TODO
+  return 0;
+}
+
+void bob::learn::misc::ISVTrainer::enrol(bob::learn::misc::ISVMachine& machine,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
+  const size_t n_iter)
+{
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > vvec;
+  vvec.push_back(ar);
+
+  const bob::learn::misc::FABase& fb = machine.getISVBase()->getBase();
+
+  m_base_trainer.initUbmNidSumStatistics(fb, vvec);
+  m_base_trainer.initializeXYZ(vvec);
+
+  for (size_t i=0; i<n_iter; ++i) {
+    m_base_trainer.updateX(fb, vvec);
+    m_base_trainer.updateZ(fb, vvec);
+  }
+
+  const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
+  machine.setZ(z);
+}
+
+
+
diff --git a/bob/learn/misc/cpp/JFATrainer.cpp b/bob/learn/misc/cpp/JFATrainer.cpp
index b53aa7c..13dea3c 100644
--- a/bob/learn/misc/cpp/JFATrainer.cpp
+++ b/bob/learn/misc/cpp/JFATrainer.cpp
@@ -18,678 +18,24 @@
 #include <algorithm>
 
 
-bob::learn::misc::FABaseTrainer::FABaseTrainer():
-  m_Nid(0), m_dim_C(0), m_dim_D(0), m_dim_ru(0), m_dim_rv(0),
-  m_x(0), m_y(0), m_z(0), m_Nacc(0), m_Facc(0)
-{
-}
-
-bob::learn::misc::FABaseTrainer::FABaseTrainer(const bob::learn::misc::FABaseTrainer& other)
-{
-}
-
-bob::learn::misc::FABaseTrainer::~FABaseTrainer()
-{
-}
-
-void bob::learn::misc::FABaseTrainer::checkStatistics(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  for (size_t id=0; id<stats.size(); ++id) {
-    for (size_t s=0; s<stats[id].size(); ++s) {
-      if (stats[id][s]->sumPx.extent(0) != (int)m_dim_C) {
-        boost::format m("GMMStats C dimension parameter = %d is different than the expected value of %d");
-        m % stats[id][s]->sumPx.extent(0) % (int)m_dim_C;
-        throw std::runtime_error(m.str());
-      }
-      if (stats[id][s]->sumPx.extent(1) != (int)m_dim_D) {
-        boost::format m("GMMStats D dimension parameter = %d is different than the expected value of %d");
-        m % stats[id][s]->sumPx.extent(1) % (int)m_dim_D;
-        throw std::runtime_error(m.str());
-      }
-    }
-  }
-}
-
-
-void bob::learn::misc::FABaseTrainer::initUbmNidSumStatistics(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  m_Nid = stats.size();
-  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm = m.getUbm();
-  // Put UBM in cache
-  m_dim_C = ubm->getNGaussians();
-  m_dim_D = ubm->getNInputs();
-  m_dim_ru = m.getDimRu();
-  m_dim_rv = m.getDimRv();
-  // Check statistics dimensionality
-  checkStatistics(m, stats);
-  // Precomputes the sum of the statistics for each client/identity
-  precomputeSumStatisticsN(stats);
-  precomputeSumStatisticsF(stats);
-  // Cache and working arrays
-  initCache();
-}
-
-void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsN(
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  m_Nacc.clear();
-  blitz::Array<double,1> Nsum(m_dim_C);
-  for (size_t id=0; id<stats.size(); ++id) {
-    Nsum = 0.;
-    for (size_t s=0; s<stats[id].size(); ++s) {
-      Nsum += stats[id][s]->n;
-    }
-    m_Nacc.push_back(bob::core::array::ccopy(Nsum));
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsF(
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  m_Facc.clear();
-  blitz::Array<double,1> Fsum(m_dim_C*m_dim_D);
-  for (size_t id=0; id<stats.size(); ++id) {
-    Fsum = 0.;
-    for (size_t s=0; s<stats[id].size(); ++s) {
-      for (size_t c=0; c<m_dim_C; ++c) {
-        blitz::Array<double,1> Fsum_c = Fsum(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-        Fsum_c += stats[id][s]->sumPx(c,blitz::Range::all());
-      }
-    }
-    m_Facc.push_back(bob::core::array::ccopy(Fsum));
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& vec)
-{
-  m_x.clear();
-  m_y.clear();
-  m_z.clear();
-
-  blitz::Array<double,1> z0(m_dim_C*m_dim_D);
-  z0 = 0;
-  blitz::Array<double,1> y0(m_dim_rv);
-  y0 = 0;
-  blitz::Array<double,2> x0(m_dim_ru,0);
-  x0 = 0;
-  for (size_t i=0; i<vec.size(); ++i)
-  {
-    m_z.push_back(bob::core::array::ccopy(z0));
-    m_y.push_back(bob::core::array::ccopy(y0));
-    x0.resize(m_dim_ru, vec[i].size());
-    x0 = 0;
-    m_x.push_back(bob::core::array::ccopy(x0));
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::resetXYZ()
-{
-  for (size_t i=0; i<m_x.size(); ++i)
-  {
-    m_x[i] = 0.;
-    m_y[i] = 0.;
-    m_z[i] = 0.;
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::initCache()
-{
-  const size_t dim_CD = m_dim_C*m_dim_D;
-  // U
-  m_cache_UtSigmaInv.resize(m_dim_ru, dim_CD);
-  m_cache_UProd.resize(m_dim_C, m_dim_ru, m_dim_ru);
-  m_cache_IdPlusUProd_ih.resize(m_dim_ru, m_dim_ru);
-  m_cache_Fn_x_ih.resize(dim_CD);
-  m_acc_U_A1.resize(m_dim_C, m_dim_ru, m_dim_ru);
-  m_acc_U_A2.resize(dim_CD, m_dim_ru);
-  // V
-  m_cache_VtSigmaInv.resize(m_dim_rv, dim_CD);
-  m_cache_VProd.resize(m_dim_C, m_dim_rv, m_dim_rv);
-  m_cache_IdPlusVProd_i.resize(m_dim_rv, m_dim_rv);
-  m_cache_Fn_y_i.resize(dim_CD);
-  m_acc_V_A1.resize(m_dim_C, m_dim_rv, m_dim_rv);
-  m_acc_V_A2.resize(dim_CD, m_dim_rv);
-  // D
-  m_cache_DtSigmaInv.resize(dim_CD);
-  m_cache_DProd.resize(dim_CD);
-  m_cache_IdPlusDProd_i.resize(dim_CD);
-  m_cache_Fn_z_i.resize(dim_CD);
-  m_acc_D_A1.resize(dim_CD);
-  m_acc_D_A2.resize(dim_CD);
-
-  // tmp
-  m_tmp_CD.resize(dim_CD);
-  m_tmp_CD_b.resize(dim_CD);
-
-  m_tmp_ru.resize(m_dim_ru);
-  m_tmp_ruD.resize(m_dim_ru, m_dim_D);
-  m_tmp_ruru.resize(m_dim_ru, m_dim_ru);
-
-  m_tmp_rv.resize(m_dim_rv);
-  m_tmp_rvD.resize(m_dim_rv, m_dim_D);
-  m_tmp_rvrv.resize(m_dim_rv, m_dim_rv);
-}
-
-
-
-//////////////////////////// V ///////////////////////////
-void bob::learn::misc::FABaseTrainer::computeVtSigmaInv(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& V = m.getV();
-  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
-  // provide a non-const version of transpose())
-  const blitz::Array<double,2> Vt = const_cast<blitz::Array<double,2>&>(V).transpose(1,0);
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  m_cache_VtSigmaInv = Vt(i,j) / sigma(j); // Vt * diag(sigma)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeVProd(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& V = m.getV();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  blitz::Range rall = blitz::Range::all();
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
-    blitz::Array<double,2> Vv_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
-    blitz::Array<double,2> Vt_c = Vv_c.transpose(1,0);
-    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-    m_tmp_rvD = Vt_c(i,j) / sigma_c(j); // Vt_c * diag(sigma)^-1
-    bob::math::prod(m_tmp_rvD, Vv_c, VProd_c);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
-{
-  const blitz::Array<double,1>& Ni = m_Nacc[id];
-  bob::math::eye(m_tmp_rvrv); // m_tmp_rvrv = I
-  blitz::Range rall = blitz::Range::all();
-  for (size_t c=0; c<m_dim_C; ++c) {
-    blitz::Array<double,2> VProd_c = m_cache_VProd(c, rall, rall);
-    m_tmp_rvrv += VProd_c * Ni(c);
-  }
-  bob::math::inv(m_tmp_rvrv, m_cache_IdPlusVProd_i); // m_cache_IdPlusVProd_i = ( I+Vt*diag(sigma)^-1*Ni*V)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeFn_y_i(const bob::learn::misc::FABase& mb,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
-{
-  const blitz::Array<double,2>& U = mb.getU();
-  const blitz::Array<double,1>& d = mb.getD();
-  // Compute Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h}) (Normalised first order statistics)
-  const blitz::Array<double,1>& Fi = m_Facc[id];
-  const blitz::Array<double,1>& m = mb.getUbmMean();
-  const blitz::Array<double,1>& z = m_z[id];
-  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
-  m_cache_Fn_y_i = Fi - m_tmp_CD * (m + d * z); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i})
-  const blitz::Array<double,2>& X = m_x[id];
-  blitz::Range rall = blitz::Range::all();
-  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
-  {
-    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
-    bob::math::prod(U, Xh, m_tmp_CD_b); // m_tmp_CD_b = U*x_{i,h}
-    const blitz::Array<double,1>& Nih = stats[h]->n;
-    bob::core::array::repelem(Nih, m_tmp_CD);
-    m_cache_Fn_y_i -= m_tmp_CD * m_tmp_CD_b; // N_{i,h} * U * x_{i,h}
-  }
-  // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-}
-
-void bob::learn::misc::FABaseTrainer::updateY_i(const size_t id)
-{
-  // Computes yi = Ayi * Cvs * Fn_yi
-  blitz::Array<double,1>& y = m_y[id];
-  // m_tmp_rv = m_cache_VtSigmaInv * m_cache_Fn_y_i = Vt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-  bob::math::prod(m_cache_VtSigmaInv, m_cache_Fn_y_i, m_tmp_rv);
-  bob::math::prod(m_cache_IdPlusVProd_i, m_tmp_rv, y);
-}
-
-void bob::learn::misc::FABaseTrainer::updateY(const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Precomputation
-  computeVtSigmaInv(m);
-  computeVProd(m);
-  // Loops over all people
-  for (size_t id=0; id<stats.size(); ++id) {
-    computeIdPlusVProd_i(id);
-    computeFn_y_i(m, stats[id], id);
-    updateY_i(id);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeAccumulatorsV(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Initializes the cache accumulator
-  m_acc_V_A1 = 0.;
-  m_acc_V_A2 = 0.;
-  // Loops over all people
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  blitz::Range rall = blitz::Range::all();
-  for (size_t id=0; id<stats.size(); ++id) {
-    computeIdPlusVProd_i(id);
-    computeFn_y_i(m, stats[id], id);
-
-    // Needs to return values to be accumulated for estimating V
-    const blitz::Array<double,1>& y = m_y[id];
-    m_tmp_rvrv = m_cache_IdPlusVProd_i;
-    m_tmp_rvrv += y(i) * y(j);
-    for (size_t c=0; c<m_dim_C; ++c)
-    {
-      blitz::Array<double,2> A1_y_c = m_acc_V_A1(c, rall, rall);
-      A1_y_c += m_tmp_rvrv * m_Nacc[id](c);
-    }
-    m_acc_V_A2 += m_cache_Fn_y_i(i) * y(j);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::updateV(blitz::Array<double,2>& V)
-{
-  blitz::Range rall = blitz::Range::all();
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    const blitz::Array<double,2> A1 = m_acc_V_A1(c, rall, rall);
-    bob::math::inv(A1, m_tmp_rvrv);
-    const blitz::Array<double,2> A2 = m_acc_V_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
-    blitz::Array<double,2> V_c = V(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), rall);
-    bob::math::prod(A2, m_tmp_rvrv, V_c);
-  }
-}
-
-
-//////////////////////////// U ///////////////////////////
-void bob::learn::misc::FABaseTrainer::computeUtSigmaInv(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& U = m.getU();
-  // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
-  // provide a non-const version of transpose())
-  const blitz::Array<double,2> Ut = const_cast<blitz::Array<double,2>&>(U).transpose(1,0);
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  m_cache_UtSigmaInv = Ut(i,j) / sigma(j); // Ut * diag(sigma)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeUProd(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,2>& U = m.getU();
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    blitz::Array<double,2> UProd_c = m_cache_UProd(c, blitz::Range::all(), blitz::Range::all());
-    blitz::Array<double,2> Uu_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1), blitz::Range::all());
-    blitz::Array<double,2> Ut_c = Uu_c.transpose(1,0);
-    blitz::Array<double,1> sigma_c = sigma(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-    m_tmp_ruD = Ut_c(i,j) / sigma_c(j); // Ut_c * diag(sigma)^-1
-    bob::math::prod(m_tmp_ruD, Uu_c, UProd_c);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeIdPlusUProd_ih(
-  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats)
-{
-  const blitz::Array<double,1>& Nih = stats->n;
-  bob::math::eye(m_tmp_ruru); // m_tmp_ruru = I
-  for (size_t c=0; c<m_dim_C; ++c) {
-    blitz::Array<double,2> UProd_c = m_cache_UProd(c,blitz::Range::all(),blitz::Range::all());
-    m_tmp_ruru += UProd_c * Nih(c);
-  }
-  bob::math::inv(m_tmp_ruru, m_cache_IdPlusUProd_ih); // m_cache_IdPlusUProd_ih = ( I+Ut*diag(sigma)^-1*Ni*U)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeFn_x_ih(const bob::learn::misc::FABase& mb,
-  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id)
-{
-  const blitz::Array<double,2>& V = mb.getV();
-  const blitz::Array<double,1>& d =  mb.getD();
-  // Compute Fn_x_ih = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i}) (Normalised first order statistics)
-  const blitz::Array<double,2>& Fih = stats->sumPx;
-  const blitz::Array<double,1>& m = mb.getUbmMean();
-  const blitz::Array<double,1>& z = m_z[id];
-  const blitz::Array<double,1>& Nih = stats->n;
-  bob::core::array::repelem(Nih, m_tmp_CD);
-  for (size_t c=0; c<m_dim_C; ++c) {
-    blitz::Array<double,1> Fn_x_ih_c = m_cache_Fn_x_ih(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1));
-    Fn_x_ih_c = Fih(c,blitz::Range::all());
-  }
-  m_cache_Fn_x_ih -= m_tmp_CD * (m + d * z); // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i})
-
-  const blitz::Array<double,1>& y = m_y[id];
-  bob::math::prod(V, y, m_tmp_CD_b);
-  m_cache_Fn_x_ih -= m_tmp_CD * m_tmp_CD_b;
-  // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
-}
-
-void bob::learn::misc::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
-{
-  // Computes xih = Axih * Cus * Fn_x_ih
-  blitz::Array<double,1> x = m_x[id](blitz::Range::all(), h);
-  // m_tmp_ru = m_cache_UtSigmaInv * m_cache_Fn_x_ih = Ut*diag(sigma)^-1 * N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
-  bob::math::prod(m_cache_UtSigmaInv, m_cache_Fn_x_ih, m_tmp_ru);
-  bob::math::prod(m_cache_IdPlusUProd_ih, m_tmp_ru, x);
-}
-
-void bob::learn::misc::FABaseTrainer::updateX(const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Precomputation
-  computeUtSigmaInv(m);
-  computeUProd(m);
-  // Loops over all people
-  for (size_t id=0; id<stats.size(); ++id) {
-    int n_session_i = stats[id].size();
-    for (int s=0; s<n_session_i; ++s) {
-      computeIdPlusUProd_ih(stats[id][s]);
-      computeFn_x_ih(m, stats[id][s], id);
-      updateX_ih(id, s);
-    }
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeAccumulatorsU(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Initializes the cache accumulator
-  m_acc_U_A1 = 0.;
-  m_acc_U_A2 = 0.;
-  // Loops over all people
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  blitz::Range rall = blitz::Range::all();
-  for (size_t id=0; id<stats.size(); ++id) {
-    int n_session_i = stats[id].size();
-    for (int h=0; h<n_session_i; ++h) {
-      computeIdPlusUProd_ih(stats[id][h]);
-      computeFn_x_ih(m, stats[id][h], id);
-
-      // Needs to return values to be accumulated for estimating U
-      blitz::Array<double,1> x = m_x[id](rall, h);
-      m_tmp_ruru = m_cache_IdPlusUProd_ih;
-      m_tmp_ruru += x(i) * x(j);
-      for (int c=0; c<(int)m_dim_C; ++c)
-      {
-        blitz::Array<double,2> A1_x_c = m_acc_U_A1(c,rall,rall);
-        A1_x_c += m_tmp_ruru * stats[id][h]->n(c);
-      }
-      m_acc_U_A2 += m_cache_Fn_x_ih(i) * x(j);
-    }
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::updateU(blitz::Array<double,2>& U)
-{
-  for (size_t c=0; c<m_dim_C; ++c)
-  {
-    const blitz::Array<double,2> A1 = m_acc_U_A1(c,blitz::Range::all(),blitz::Range::all());
-    bob::math::inv(A1, m_tmp_ruru);
-    const blitz::Array<double,2> A2 = m_acc_U_A2(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
-    blitz::Array<double,2> U_c = U(blitz::Range(c*m_dim_D,(c+1)*m_dim_D-1),blitz::Range::all());
-    bob::math::prod(A2, m_tmp_ruru, U_c);
-  }
-}
-
-
-//////////////////////////// D ///////////////////////////
-void bob::learn::misc::FABaseTrainer::computeDtSigmaInv(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,1>& d = m.getD();
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  m_cache_DtSigmaInv = d / sigma; // Dt * diag(sigma)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeDProd(const bob::learn::misc::FABase& m)
-{
-  const blitz::Array<double,1>& d = m.getD();
-  const blitz::Array<double,1>& sigma = m.getUbmVariance();
-  m_cache_DProd = d / sigma * d; // Dt * diag(sigma)^-1 * D
-}
-
-void bob::learn::misc::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
-{
-  const blitz::Array<double,1>& Ni = m_Nacc[id];
-  bob::core::array::repelem(Ni, m_tmp_CD); // m_tmp_CD = Ni 'repmat'
-  m_cache_IdPlusDProd_i = 1.; // m_cache_IdPlusDProd_i = Id
-  m_cache_IdPlusDProd_i += m_cache_DProd * m_tmp_CD; // m_cache_IdPlusDProd_i = I+Dt*diag(sigma)^-1*Ni*D
-  m_cache_IdPlusDProd_i = 1 / m_cache_IdPlusDProd_i; // m_cache_IdPlusVProd_i = (I+Dt*diag(sigma)^-1*Ni*D)^-1
-}
-
-void bob::learn::misc::FABaseTrainer::computeFn_z_i(
-  const bob::learn::misc::FABase& mb,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
-{
-  const blitz::Array<double,2>& U = mb.getU();
-  const blitz::Array<double,2>& V = mb.getV();
-  // Compute Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h}) (Normalised first order statistics)
-  const blitz::Array<double,1>& Fi = m_Facc[id];
-  const blitz::Array<double,1>& m = mb.getUbmMean();
-  const blitz::Array<double,1>& y = m_y[id];
-  bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
-  bob::math::prod(V, y, m_tmp_CD_b); // m_tmp_CD_b = V * y
-  m_cache_Fn_z_i = Fi - m_tmp_CD * (m + m_tmp_CD_b); // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i})
-
-  const blitz::Array<double,2>& X = m_x[id];
-  blitz::Range rall = blitz::Range::all();
-  for (int h=0; h<X.extent(1); ++h) // Loops over the sessions
-  {
-    const blitz::Array<double,1>& Nh = stats[h]->n; // Nh = N_{i,h} (length: C)
-    bob::core::array::repelem(Nh, m_tmp_CD);
-    blitz::Array<double,1> Xh = X(rall, h); // Xh = x_{i,h} (length: ru)
-    bob::math::prod(U, Xh, m_tmp_CD_b);
-    m_cache_Fn_z_i -= m_tmp_CD * m_tmp_CD_b;
-  }
-  // Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
-}
-
-void bob::learn::misc::FABaseTrainer::updateZ_i(const size_t id)
-{
-  // Computes zi = Azi * D^T.Sigma^-1 * Fn_zi
-  blitz::Array<double,1>& z = m_z[id];
-  // m_tmp_CD = m_cache_DtSigmaInv * m_cache_Fn_z_i = Dt*diag(sigma)^-1 * sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
-  z = m_cache_IdPlusDProd_i * m_cache_DtSigmaInv * m_cache_Fn_z_i;
-}
-
-void bob::learn::misc::FABaseTrainer::updateZ(const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Precomputation
-  computeDtSigmaInv(m);
-  computeDProd(m);
-  // Loops over all people
-  for (size_t id=0; id<m_Nid; ++id) {
-    computeIdPlusDProd_i(id);
-    computeFn_z_i(m, stats[id], id);
-    updateZ_i(id);
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::computeAccumulatorsD(
-  const bob::learn::misc::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
-{
-  // Initializes the cache accumulator
-  m_acc_D_A1 = 0.;
-  m_acc_D_A2 = 0.;
-  // Loops over all people
-  blitz::firstIndex i;
-  blitz::secondIndex j;
-  for (size_t id=0; id<stats.size(); ++id) {
-    computeIdPlusDProd_i(id);
-    computeFn_z_i(m, stats[id], id);
-
-    // Needs to return values to be accumulated for estimating D
-    blitz::Array<double,1> z = m_z[id];
-    bob::core::array::repelem(m_Nacc[id], m_tmp_CD);
-    m_acc_D_A1 += (m_cache_IdPlusDProd_i + z * z) * m_tmp_CD;
-    m_acc_D_A2 += m_cache_Fn_z_i * z;
-  }
-}
-
-void bob::learn::misc::FABaseTrainer::updateD(blitz::Array<double,1>& d)
-{
-  d = m_acc_D_A2 / m_acc_D_A1;
-}
-
-
-
-//////////////////////////// ISVTrainer ///////////////////////////
-bob::learn::misc::ISVTrainer::ISVTrainer(const size_t max_iterations, const double relevance_factor):
-  EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
-    (0.001, max_iterations, false),
-  m_relevance_factor(relevance_factor)
-{
-}
-
-bob::learn::misc::ISVTrainer::ISVTrainer(const bob::learn::misc::ISVTrainer& other):
-  EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
-    (other.m_convergence_threshold, other.m_max_iterations,
-     other.m_compute_likelihood),
-  m_relevance_factor(other.m_relevance_factor)
-{
-}
-
-bob::learn::misc::ISVTrainer::~ISVTrainer()
-{
-}
-
-bob::learn::misc::ISVTrainer& bob::learn::misc::ISVTrainer::operator=
-(const bob::learn::misc::ISVTrainer& other)
-{
-  if (this != &other)
-  {
-    bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
-      std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::operator=(other);
-    m_relevance_factor = other.m_relevance_factor;
-  }
-  return *this;
-}
-
-bool bob::learn::misc::ISVTrainer::operator==(const bob::learn::misc::ISVTrainer& b) const
-{
-  return bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
-            std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::operator==(b) &&
-          m_relevance_factor == b.m_relevance_factor;
-}
-
-bool bob::learn::misc::ISVTrainer::operator!=(const bob::learn::misc::ISVTrainer& b) const
-{
-  return !(this->operator==(b));
-}
-
-bool bob::learn::misc::ISVTrainer::is_similar_to(const bob::learn::misc::ISVTrainer& b,
-  const double r_epsilon, const double a_epsilon) const
-{
-  return bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
-            std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::is_similar_to(b, r_epsilon, a_epsilon) &&
-          m_relevance_factor == b.m_relevance_factor;
-}
-
-void bob::learn::misc::ISVTrainer::initialize(bob::learn::misc::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
-  m_base_trainer.initializeXYZ(ar);
-
-  blitz::Array<double,2>& U = machine.updateU();
-  bob::core::array::randn(*m_rng, U);
-  initializeD(machine);
-  machine.precompute();
-}
-
-void bob::learn::misc::ISVTrainer::initializeD(bob::learn::misc::ISVBase& machine) const
-{
-  // D = sqrt(variance(UBM) / relevance_factor)
-  blitz::Array<double,1> d = machine.updateD();
-  d = sqrt(machine.getBase().getUbmVariance() / m_relevance_factor);
-}
-
-void bob::learn::misc::ISVTrainer::finalize(bob::learn::misc::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-}
-
-void bob::learn::misc::ISVTrainer::eStep(bob::learn::misc::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  m_base_trainer.resetXYZ();
-
-  const bob::learn::misc::FABase& base = machine.getBase();
-  m_base_trainer.updateX(base, ar);
-  m_base_trainer.updateZ(base, ar);
-  m_base_trainer.computeAccumulatorsU(base, ar);
-}
-
-void bob::learn::misc::ISVTrainer::mStep(bob::learn::misc::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
-{
-  blitz::Array<double,2>& U = machine.updateU();
-  m_base_trainer.updateU(U);
-  machine.precompute();
-}
-
-double bob::learn::misc::ISVTrainer::computeLikelihood(bob::learn::misc::ISVBase& machine)
-{
-  // TODO
-  return 0;
-}
-
-void bob::learn::misc::ISVTrainer::enrol(bob::learn::misc::ISVMachine& machine,
-  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
-  const size_t n_iter)
-{
-  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > vvec;
-  vvec.push_back(ar);
-
-  const bob::learn::misc::FABase& fb = machine.getISVBase()->getBase();
-
-  m_base_trainer.initUbmNidSumStatistics(fb, vvec);
-  m_base_trainer.initializeXYZ(vvec);
-
-  for (size_t i=0; i<n_iter; ++i) {
-    m_base_trainer.updateX(fb, vvec);
-    m_base_trainer.updateZ(fb, vvec);
-  }
-
-  const blitz::Array<double,1> z(m_base_trainer.getZ()[0]);
-  machine.setZ(z);
-}
-
-
-
 //////////////////////////// JFATrainer ///////////////////////////
 bob::learn::misc::JFATrainer::JFATrainer(const size_t max_iterations):
-  m_max_iterations(max_iterations), m_rng(new boost::mt19937())
-{
-}
+  m_rng(new boost::mt19937())
+{}
 
 bob::learn::misc::JFATrainer::JFATrainer(const bob::learn::misc::JFATrainer& other):
-  m_max_iterations(other.m_max_iterations), m_rng(other.m_rng)
-{
-}
+ m_rng(other.m_rng)
+{}
 
 bob::learn::misc::JFATrainer::~JFATrainer()
-{
-}
+{}
 
 bob::learn::misc::JFATrainer& bob::learn::misc::JFATrainer::operator=
 (const bob::learn::misc::JFATrainer& other)
 {
   if (this != &other)
   {
-    m_max_iterations = other.m_max_iterations;
+    //m_max_iterations = other.m_max_iterations;
     m_rng = other.m_rng;
   }
   return *this;
@@ -697,7 +43,8 @@ bob::learn::misc::JFATrainer& bob::learn::misc::JFATrainer::operator=
 
 bool bob::learn::misc::JFATrainer::operator==(const bob::learn::misc::JFATrainer& b) const
 {
-  return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
+  //return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
+  return *m_rng == *(b.m_rng);
 }
 
 bool bob::learn::misc::JFATrainer::operator!=(const bob::learn::misc::JFATrainer& b) const
@@ -708,7 +55,8 @@ bool bob::learn::misc::JFATrainer::operator!=(const bob::learn::misc::JFATrainer
 bool bob::learn::misc::JFATrainer::is_similar_to(const bob::learn::misc::JFATrainer& b,
   const double r_epsilon, const double a_epsilon) const
 {
-  return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
+  //return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
+  return *m_rng == *(b.m_rng);
 }
 
 void bob::learn::misc::JFATrainer::initialize(bob::learn::misc::JFABase& machine,
@@ -793,6 +141,7 @@ void bob::learn::misc::JFATrainer::finalize3(bob::learn::misc::JFABase& machine,
 {
 }
 
+/*
 void bob::learn::misc::JFATrainer::train_loop(bob::learn::misc::JFABase& machine,
   const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
@@ -814,15 +163,18 @@ void bob::learn::misc::JFATrainer::train_loop(bob::learn::misc::JFABase& machine
     mStep3(machine, ar);
   }
   finalize3(machine, ar);
-}
+}*/
 
+/*
 void bob::learn::misc::JFATrainer::train(bob::learn::misc::JFABase& machine,
   const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   initialize(machine, ar);
   train_loop(machine, ar);
 }
+*/
 
+/*
 void bob::learn::misc::JFATrainer::enrol(bob::learn::misc::JFAMachine& machine,
   const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
   const size_t n_iter)
@@ -846,4 +198,5 @@ void bob::learn::misc::JFATrainer::enrol(bob::learn::misc::JFAMachine& machine,
   machine.setY(y);
   machine.setZ(z);
 }
+*/
 
diff --git a/bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h b/bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h
new file mode 100644
index 0000000..c5b2734
--- /dev/null
+++ b/bob/learn/misc/include/bob.learn.misc/FABaseTrainer.h
@@ -0,0 +1,350 @@
+/**
+ * @date Sat Jan 31 17:16:17 2015 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ *
+ * @brief FABaseTrainer functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_MISC_FABASETRAINER_H
+#define BOB_LEARN_MISC_FABASETRAINER_H
+
+#include <blitz/array.h>
+#include <bob.learn.misc/GMMStats.h>
+#include <bob.learn.misc/JFAMachine.h>
+#include <vector>
+
+#include <map>
+#include <string>
+#include <bob.core/array_copy.h>
+#include <boost/shared_ptr.hpp>
+#include <boost/random.hpp>
+#include <bob.core/logging.h>
+
+namespace bob { namespace learn { namespace misc {
+
+class FABaseTrainer
+{
+  public:
+    /**
+     * @brief Constructor
+     */
+    FABaseTrainer();
+
+    /**
+     * @brief Copy constructor
+     */
+    FABaseTrainer(const FABaseTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    ~FABaseTrainer();
+
+    /**
+     * @brief Check that the dimensionality of the statistics match.
+     */
+    void checkStatistics(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+
+    /**
+     * @brief Initialize the dimensionality, the UBM, the sums of the
+     * statistics and the number of identities.
+     */
+    void initUbmNidSumStatistics(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+
+    /**
+     * @brief Precomputes the sums of the zeroth order statistics over the
+     * sessions for each client
+     */
+    void precomputeSumStatisticsN(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Precomputes the sums of the first order statistics over the
+     * sessions for each client
+     */
+    void precomputeSumStatisticsF(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+
+    /**
+     * @brief Initializes (allocates and sets to zero) the x, y, z speaker
+     * factors
+     */
+    void initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+
+    /**
+     * @brief Resets the x, y, z speaker factors to zero values
+     */
+    void resetXYZ();
+
+
+    /**** Y and V functions ****/
+    /**
+     * @brief Computes Vt * diag(sigma)^-1
+     */
+    void computeVtSigmaInv(const bob::learn::misc::FABase& m);
+    /**
+     * @brief Computes Vt_{c} * diag(sigma)^-1 * V_{c} for each Gaussian c
+     */
+    void computeVProd(const bob::learn::misc::FABase& m);
+    /**
+     * @brief Computes (I+Vt*diag(sigma)^-1*Ni*V)^-1 which occurs in the y
+     * estimation for the given person
+     */
+    void computeIdPlusVProd_i(const size_t id);
+    /**
+     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+     * which occurs in the y estimation of the given person
+     */
+    void computeFn_y_i(const bob::learn::misc::FABase& m,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats,
+      const size_t id);
+    /**
+     * @brief Updates y_i (of the current person) and the accumulators to
+     * compute V with the cache values m_cache_IdPlusVprod_i, m_VtSigmaInv and
+     * m_cache_Fn_y_i
+     */
+    void updateY_i(const size_t id);
+    /**
+     * @brief Updates y and the accumulators to compute V
+     */
+    void updateY(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Computes the accumulators m_acc_V_A1 and m_acc_V_A2 for V
+     * V = A2 * A1^-1
+     */
+    void computeAccumulatorsV(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Updates V from the accumulators m_acc_V_A1 and m_acc_V_A2
+     */
+    void updateV(blitz::Array<double,2>& V);
+
+
+    /**** X and U functions ****/
+    /**
+     * @brief Computes Ut * diag(sigma)^-1
+     */
+    void computeUtSigmaInv(const bob::learn::misc::FABase& m);
+    /**
+     * @brief Computes Ut_{c} * diag(sigma)^-1 * U_{c} for each Gaussian c
+     */
+    void computeUProd(const bob::learn::misc::FABase& m);
+    /**
+     * @brief Computes (I+Ut*diag(sigma)^-1*Ni*U)^-1 which occurs in the x
+     * estimation
+     */
+    void computeIdPlusUProd_ih(const boost::shared_ptr<bob::learn::misc::GMMStats>& stats);
+    /**
+     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
+     * which occurs in the y estimation of the given person
+     */
+    void computeFn_x_ih(const bob::learn::misc::FABase& m,
+      const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id);
+    /**
+     * @brief Updates x_ih (of the current person/session) and the
+     * accumulators to compute U with the cache values m_cache_IdPlusVprod_i,
+     * m_VtSigmaInv and m_cache_Fn_y_i
+     */
+    void updateX_ih(const size_t id, const size_t h);
+    /**
+     * @brief Updates x
+     */
+    void updateX(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Computes the accumulators m_acc_U_A1 and m_acc_U_A2 for U
+     * U = A2 * A1^-1
+     */
+    void computeAccumulatorsU(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Updates U from the accumulators m_acc_U_A1 and m_acc_U_A2
+     */
+    void updateU(blitz::Array<double,2>& U);
+
+
+    /**** z and D functions ****/
+    /**
+     * @brief Computes diag(D) * diag(sigma)^-1
+     */
+    void computeDtSigmaInv(const bob::learn::misc::FABase& m);
+    /**
+     * @brief Computes Dt_{c} * diag(sigma)^-1 * D_{c} for each Gaussian c
+     */
+    void computeDProd(const bob::learn::misc::FABase& m);
+    /**
+     * @brief Computes (I+diag(d)t*diag(sigma)^-1*Ni*diag(d))^-1 which occurs
+     * in the z estimation for the given person
+     */
+    void computeIdPlusDProd_i(const size_t id);
+    /**
+     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
+     * which occurs in the y estimation of the given person
+     */
+    void computeFn_z_i(const bob::learn::misc::FABase& m,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id);
+    /**
+     * @brief Updates z_i (of the current person) and the accumulators to
+     * compute D with the cache values m_cache_IdPlusDProd_i, m_VtSigmaInv
+     * and m_cache_Fn_z_i
+     */
+    void updateZ_i(const size_t id);
+    /**
+     * @brief Updates z and the accumulators to compute D
+     */
+    void updateZ(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Computes the accumulators m_acc_D_A1 and m_acc_D_A2 for d
+     * d = A2 * A1^-1
+     */
+    void computeAccumulatorsD(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
+    /**
+     * @brief Updates d from the accumulators m_acc_D_A1 and m_acc_D_A2
+     */
+    void updateD(blitz::Array<double,1>& d);
+
+
+    /**
+     * @brief Get the zeroth order statistics
+     */
+    const std::vector<blitz::Array<double,1> >& getNacc() const
+    { return m_Nacc; }
+    /**
+     * @brief Get the first order statistics
+     */
+    const std::vector<blitz::Array<double,1> >& getFacc() const
+    { return m_Facc; }
+    /**
+     * @brief Get the x speaker factors
+     */
+    const std::vector<blitz::Array<double,2> >& getX() const
+    { return m_x; }
+    /**
+     * @brief Get the y speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getY() const
+    { return m_y; }
+    /**
+     * @brief Get the z speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getZ() const
+    { return m_z; }
+    /**
+     * @brief Set the x speaker factors
+     */
+    void setX(const std::vector<blitz::Array<double,2> >& X)
+    { m_x = X; }
+    /**
+     * @brief Set the y speaker factors
+     */
+    void setY(const std::vector<blitz::Array<double,1> >& y)
+    { m_y = y; }
+    /**
+     * @brief Set the z speaker factors
+     */
+    void setZ(const std::vector<blitz::Array<double,1> >& z)
+    { m_z = z; }
+
+    /**
+     * @brief Initializes the cache to process the given statistics
+     */
+    void initCache();
+
+    /**
+     * @brief Getters for the accumulators
+     */
+    const blitz::Array<double,3>& getAccVA1() const
+    { return m_acc_V_A1; }
+    const blitz::Array<double,2>& getAccVA2() const
+    { return m_acc_V_A2; }
+    const blitz::Array<double,3>& getAccUA1() const
+    { return m_acc_U_A1; }
+    const blitz::Array<double,2>& getAccUA2() const
+    { return m_acc_U_A2; }
+    const blitz::Array<double,1>& getAccDA1() const
+    { return m_acc_D_A1; }
+    const blitz::Array<double,1>& getAccDA2() const
+    { return m_acc_D_A2; }
+
+    /**
+     * @brief Setters for the accumulators, Very useful if the e-Step needs
+     * to be parallelized.
+     */
+    void setAccVA1(const blitz::Array<double,3>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_V_A1);
+      m_acc_V_A1 = acc; }
+    void setAccVA2(const blitz::Array<double,2>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_V_A2);
+      m_acc_V_A2 = acc; }
+    void setAccUA1(const blitz::Array<double,3>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_U_A1);
+      m_acc_U_A1 = acc; }
+    void setAccUA2(const blitz::Array<double,2>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_U_A2);
+      m_acc_U_A2 = acc; }
+    void setAccDA1(const blitz::Array<double,1>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_D_A1);
+      m_acc_D_A1 = acc; }
+    void setAccDA2(const blitz::Array<double,1>& acc)
+    { bob::core::array::assertSameShape(acc, m_acc_D_A2);
+      m_acc_D_A2 = acc; }
+
+
+  private:
+    size_t m_Nid; // Number of identities
+    size_t m_dim_C; // Number of Gaussian components of the UBM GMM
+    size_t m_dim_D; // Dimensionality of the feature space
+    size_t m_dim_ru; // Rank of the U subspace
+    size_t m_dim_rv; // Rank of the V subspace
+
+    std::vector<blitz::Array<double,2> > m_x; // matrix x of speaker factors for eigenchannels U, for each client
+    std::vector<blitz::Array<double,1> > m_y; // vector y of spealer factors for eigenvoices V, for each client
+    std::vector<blitz::Array<double,1> > m_z; // vector z of spealer factors for eigenvoices Z, for each client
+
+    std::vector<blitz::Array<double,1> > m_Nacc; // Sum of the zeroth order statistics over the sessions for each client, dimension C
+    std::vector<blitz::Array<double,1> > m_Facc; // Sum of the first order statistics over the sessions for each client, dimension CD
+
+    // Accumulators for the M-step
+    blitz::Array<double,3> m_acc_V_A1;
+    blitz::Array<double,2> m_acc_V_A2;
+    blitz::Array<double,3> m_acc_U_A1;
+    blitz::Array<double,2> m_acc_U_A2;
+    blitz::Array<double,1> m_acc_D_A1;
+    blitz::Array<double,1> m_acc_D_A2;
+
+    // Cache/Precomputation
+    blitz::Array<double,2> m_cache_VtSigmaInv; // Vt * diag(sigma)^-1
+    blitz::Array<double,3> m_cache_VProd; // first dimension is the Gaussian id
+    blitz::Array<double,2> m_cache_IdPlusVProd_i;
+    blitz::Array<double,1> m_cache_Fn_y_i;
+
+    blitz::Array<double,2> m_cache_UtSigmaInv; // Ut * diag(sigma)^-1
+    blitz::Array<double,3> m_cache_UProd; // first dimension is the Gaussian id
+    blitz::Array<double,2> m_cache_IdPlusUProd_ih;
+    blitz::Array<double,1> m_cache_Fn_x_ih;
+
+    blitz::Array<double,1> m_cache_DtSigmaInv; // Dt * diag(sigma)^-1
+    blitz::Array<double,1> m_cache_DProd; // supervector length dimension
+    blitz::Array<double,1> m_cache_IdPlusDProd_i;
+    blitz::Array<double,1> m_cache_Fn_z_i;
+
+    // Working arrays
+    mutable blitz::Array<double,2> m_tmp_ruru;
+    mutable blitz::Array<double,2> m_tmp_ruD;
+    mutable blitz::Array<double,2> m_tmp_rvrv;
+    mutable blitz::Array<double,2> m_tmp_rvD;
+    mutable blitz::Array<double,1> m_tmp_rv;
+    mutable blitz::Array<double,1> m_tmp_ru;
+    mutable blitz::Array<double,1> m_tmp_CD;
+    mutable blitz::Array<double,1> m_tmp_CD_b;
+};
+
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_MISC_FABASETRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/ISVTrainer.h b/bob/learn/misc/include/bob.learn.misc/ISVTrainer.h
new file mode 100644
index 0000000..e6b5285
--- /dev/null
+++ b/bob/learn/misc/include/bob.learn.misc/ISVTrainer.h
@@ -0,0 +1,158 @@
+/**
+ * @date Tue Jul 19 12:16:17 2011 +0200
+ * @author Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+ *
+ * @brief JFA functions
+ *
+ * Copyright (C) Idiap Research Institute, Martigny, Switzerland
+ */
+
+#ifndef BOB_LEARN_MISC_ISVTRAINER_H
+#define BOB_LEARN_MISC_ISVTRAINER_H
+
+#include <blitz/array.h>
+#include <bob.learn.misc/GMMStats.h>
+#include <bob.learn.misc/FABaseTrainer.h>
+#include <bob.learn.misc/ISVMachine.h>
+#include <vector>
+
+#include <map>
+#include <string>
+#include <bob.core/array_copy.h>
+#include <boost/shared_ptr.hpp>
+#include <boost/random.hpp>
+#include <bob.core/logging.h>
+
+namespace bob { namespace learn { namespace misc {
+
+
+class ISVTrainer
+{
+  public:
+    /**
+     * @brief Constructor
+     */
+    ISVTrainer(const size_t max_iterations=10, const double relevance_factor=4.);
+
+    /**
+     * @brief Copy onstructor
+     */
+    ISVTrainer(const ISVTrainer& other);
+
+    /**
+     * @brief Destructor
+     */
+    virtual ~ISVTrainer();
+
+    /**
+     * @brief Assignment operator
+     */
+    ISVTrainer& operator=(const ISVTrainer& other);
+
+    /**
+     * @brief Equal to
+     */
+    bool operator==(const ISVTrainer& b) const;
+
+    /**
+     * @brief Not equal to
+     */
+    bool operator!=(const ISVTrainer& b) const;
+
+    /**
+     * @brief Similar to
+     */
+    bool is_similar_to(const ISVTrainer& b, const double r_epsilon=1e-5,
+      const double a_epsilon=1e-8) const;
+
+    /**
+     * @brief This methods performs some initialization before the EM loop.
+     */
+    virtual void initialize(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
+    /**
+     * @brief This methods performs some actions after the EM loop.
+     */
+    virtual void finalize(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
+
+    /**
+     * @brief Calculates and saves statistics across the dataset
+     * The statistics will be used in the mStep() that follows.
+     */
+    virtual void eStep(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
+
+    /**
+     * @brief Performs a maximization step to update the parameters of the
+     * factor analysis model.
+     */
+    virtual void mStep(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
+
+    /**
+     * @brief Computes the average log likelihood using the current estimates
+     * of the latent variables.
+     */
+    virtual double computeLikelihood(bob::learn::misc::ISVBase& machine);
+
+    /**
+     * @brief Enrol a client
+     */
+    void enrol(bob::learn::misc::ISVMachine& machine,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& features,
+      const size_t n_iter);
+
+    /**
+     * @brief Get the x speaker factors
+     */
+    const std::vector<blitz::Array<double,2> >& getX() const
+    { return m_base_trainer.getX(); }
+    /**
+     * @brief Get the z speaker factors
+     */
+    const std::vector<blitz::Array<double,1> >& getZ() const
+    { return m_base_trainer.getZ(); }
+    /**
+     * @brief Set the x speaker factors
+     */
+    void setX(const std::vector<blitz::Array<double,2> >& X)
+    { m_base_trainer.setX(X); }
+    /**
+     * @brief Set the z speaker factors
+     */
+    void setZ(const std::vector<blitz::Array<double,1> >& z)
+    { m_base_trainer.setZ(z); }
+
+    /**
+     * @brief Getters for the accumulators
+     */
+    const blitz::Array<double,3>& getAccUA1() const
+    { return m_base_trainer.getAccUA1(); }
+    const blitz::Array<double,2>& getAccUA2() const
+    { return m_base_trainer.getAccUA2(); }
+
+    /**
+     * @brief Setters for the accumulators, Very useful if the e-Step needs
+     * to be parallelized.
+     */
+    void setAccUA1(const blitz::Array<double,3>& acc)
+    { m_base_trainer.setAccUA1(acc); }
+    void setAccUA2(const blitz::Array<double,2>& acc)
+    { m_base_trainer.setAccUA2(acc); }
+
+
+  private:
+    /**
+     * @brief Initialize D to sqrt(ubm_var/relevance_factor)
+     */
+    void initializeD(bob::learn::misc::ISVBase& machine) const;
+
+    // Attributes
+    bob::learn::misc::FABaseTrainer m_base_trainer;
+    double m_relevance_factor;
+};
+
+} } } // namespaces
+
+#endif /* BOB_LEARN_MISC_ISVTRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/JFATrainer.h b/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
index 3cfa08d..f8c818e 100644
--- a/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
@@ -11,8 +11,8 @@
 #define BOB_LEARN_MISC_JFATRAINER_H
 
 #include <blitz/array.h>
-#include <bob.learn.misc/EMTrainer.h>
 #include <bob.learn.misc/GMMStats.h>
+#include <bob.learn.misc/FABaseTrainer.h>
 #include <bob.learn.misc/JFAMachine.h>
 #include <vector>
 
@@ -25,326 +25,6 @@
 
 namespace bob { namespace learn { namespace misc {
 
-class FABaseTrainer
-{
-  public:
-    /**
-     * @brief Constructor
-     */
-    FABaseTrainer();
-
-    /**
-     * @brief Copy constructor
-     */
-    FABaseTrainer(const FABaseTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    ~FABaseTrainer();
-
-    /**
-     * @brief Check that the dimensionality of the statistics match.
-     */
-    void checkStatistics(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Initialize the dimensionality, the UBM, the sums of the
-     * statistics and the number of identities.
-     */
-    void initUbmNidSumStatistics(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Precomputes the sums of the zeroth order statistics over the
-     * sessions for each client
-     */
-    void precomputeSumStatisticsN(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Precomputes the sums of the first order statistics over the
-     * sessions for each client
-     */
-    void precomputeSumStatisticsF(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Initializes (allocates and sets to zero) the x, y, z speaker
-     * factors
-     */
-    void initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-
-    /**
-     * @brief Resets the x, y, z speaker factors to zero values
-     */
-    void resetXYZ();
-
-
-    /**** Y and V functions ****/
-    /**
-     * @brief Computes Vt * diag(sigma)^-1
-     */
-    void computeVtSigmaInv(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes Vt_{c} * diag(sigma)^-1 * V_{c} for each Gaussian c
-     */
-    void computeVProd(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes (I+Vt*diag(sigma)^-1*Ni*V)^-1 which occurs in the y
-     * estimation for the given person
-     */
-    void computeIdPlusVProd_i(const size_t id);
-    /**
-     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-     * which occurs in the y estimation of the given person
-     */
-    void computeFn_y_i(const bob::learn::misc::FABase& m,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats,
-      const size_t id);
-    /**
-     * @brief Updates y_i (of the current person) and the accumulators to
-     * compute V with the cache values m_cache_IdPlusVprod_i, m_VtSigmaInv and
-     * m_cache_Fn_y_i
-     */
-    void updateY_i(const size_t id);
-    /**
-     * @brief Updates y and the accumulators to compute V
-     */
-    void updateY(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Computes the accumulators m_acc_V_A1 and m_acc_V_A2 for V
-     * V = A2 * A1^-1
-     */
-    void computeAccumulatorsV(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Updates V from the accumulators m_acc_V_A1 and m_acc_V_A2
-     */
-    void updateV(blitz::Array<double,2>& V);
-
-
-    /**** X and U functions ****/
-    /**
-     * @brief Computes Ut * diag(sigma)^-1
-     */
-    void computeUtSigmaInv(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes Ut_{c} * diag(sigma)^-1 * U_{c} for each Gaussian c
-     */
-    void computeUProd(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes (I+Ut*diag(sigma)^-1*Ni*U)^-1 which occurs in the x
-     * estimation
-     */
-    void computeIdPlusUProd_ih(const boost::shared_ptr<bob::learn::misc::GMMStats>& stats);
-    /**
-     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
-     * which occurs in the y estimation of the given person
-     */
-    void computeFn_x_ih(const bob::learn::misc::FABase& m,
-      const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id);
-    /**
-     * @brief Updates x_ih (of the current person/session) and the
-     * accumulators to compute U with the cache values m_cache_IdPlusVprod_i,
-     * m_VtSigmaInv and m_cache_Fn_y_i
-     */
-    void updateX_ih(const size_t id, const size_t h);
-    /**
-     * @brief Updates x
-     */
-    void updateX(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Computes the accumulators m_acc_U_A1 and m_acc_U_A2 for U
-     * U = A2 * A1^-1
-     */
-    void computeAccumulatorsU(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Updates U from the accumulators m_acc_U_A1 and m_acc_U_A2
-     */
-    void updateU(blitz::Array<double,2>& U);
-
-
-    /**** z and D functions ****/
-    /**
-     * @brief Computes diag(D) * diag(sigma)^-1
-     */
-    void computeDtSigmaInv(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes Dt_{c} * diag(sigma)^-1 * D_{c} for each Gaussian c
-     */
-    void computeDProd(const bob::learn::misc::FABase& m);
-    /**
-     * @brief Computes (I+diag(d)t*diag(sigma)^-1*Ni*diag(d))^-1 which occurs
-     * in the z estimation for the given person
-     */
-    void computeIdPlusDProd_i(const size_t id);
-    /**
-     * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
-     * which occurs in the y estimation of the given person
-     */
-    void computeFn_z_i(const bob::learn::misc::FABase& m,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id);
-    /**
-     * @brief Updates z_i (of the current person) and the accumulators to
-     * compute D with the cache values m_cache_IdPlusDProd_i, m_VtSigmaInv
-     * and m_cache_Fn_z_i
-     */
-    void updateZ_i(const size_t id);
-    /**
-     * @brief Updates z and the accumulators to compute D
-     */
-    void updateZ(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Computes the accumulators m_acc_D_A1 and m_acc_D_A2 for d
-     * d = A2 * A1^-1
-     */
-    void computeAccumulatorsD(const bob::learn::misc::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
-    /**
-     * @brief Updates d from the accumulators m_acc_D_A1 and m_acc_D_A2
-     */
-    void updateD(blitz::Array<double,1>& d);
-
-
-    /**
-     * @brief Get the zeroth order statistics
-     */
-    const std::vector<blitz::Array<double,1> >& getNacc() const
-    { return m_Nacc; }
-    /**
-     * @brief Get the first order statistics
-     */
-    const std::vector<blitz::Array<double,1> >& getFacc() const
-    { return m_Facc; }
-    /**
-     * @brief Get the x speaker factors
-     */
-    const std::vector<blitz::Array<double,2> >& getX() const
-    { return m_x; }
-    /**
-     * @brief Get the y speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getY() const
-    { return m_y; }
-    /**
-     * @brief Get the z speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getZ() const
-    { return m_z; }
-    /**
-     * @brief Set the x speaker factors
-     */
-    void setX(const std::vector<blitz::Array<double,2> >& X)
-    { m_x = X; }
-    /**
-     * @brief Set the y speaker factors
-     */
-    void setY(const std::vector<blitz::Array<double,1> >& y)
-    { m_y = y; }
-    /**
-     * @brief Set the z speaker factors
-     */
-    void setZ(const std::vector<blitz::Array<double,1> >& z)
-    { m_z = z; }
-
-    /**
-     * @brief Initializes the cache to process the given statistics
-     */
-    void initCache();
-
-    /**
-     * @brief Getters for the accumulators
-     */
-    const blitz::Array<double,3>& getAccVA1() const
-    { return m_acc_V_A1; }
-    const blitz::Array<double,2>& getAccVA2() const
-    { return m_acc_V_A2; }
-    const blitz::Array<double,3>& getAccUA1() const
-    { return m_acc_U_A1; }
-    const blitz::Array<double,2>& getAccUA2() const
-    { return m_acc_U_A2; }
-    const blitz::Array<double,1>& getAccDA1() const
-    { return m_acc_D_A1; }
-    const blitz::Array<double,1>& getAccDA2() const
-    { return m_acc_D_A2; }
-
-    /**
-     * @brief Setters for the accumulators, Very useful if the e-Step needs
-     * to be parallelized.
-     */
-    void setAccVA1(const blitz::Array<double,3>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_V_A1);
-      m_acc_V_A1 = acc; }
-    void setAccVA2(const blitz::Array<double,2>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_V_A2);
-      m_acc_V_A2 = acc; }
-    void setAccUA1(const blitz::Array<double,3>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_U_A1);
-      m_acc_U_A1 = acc; }
-    void setAccUA2(const blitz::Array<double,2>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_U_A2);
-      m_acc_U_A2 = acc; }
-    void setAccDA1(const blitz::Array<double,1>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_D_A1);
-      m_acc_D_A1 = acc; }
-    void setAccDA2(const blitz::Array<double,1>& acc)
-    { bob::core::array::assertSameShape(acc, m_acc_D_A2);
-      m_acc_D_A2 = acc; }
-
-
-  private:
-    size_t m_Nid; // Number of identities
-    size_t m_dim_C; // Number of Gaussian components of the UBM GMM
-    size_t m_dim_D; // Dimensionality of the feature space
-    size_t m_dim_ru; // Rank of the U subspace
-    size_t m_dim_rv; // Rank of the V subspace
-
-    std::vector<blitz::Array<double,2> > m_x; // matrix x of speaker factors for eigenchannels U, for each client
-    std::vector<blitz::Array<double,1> > m_y; // vector y of spealer factors for eigenvoices V, for each client
-    std::vector<blitz::Array<double,1> > m_z; // vector z of spealer factors for eigenvoices Z, for each client
-
-    std::vector<blitz::Array<double,1> > m_Nacc; // Sum of the zeroth order statistics over the sessions for each client, dimension C
-    std::vector<blitz::Array<double,1> > m_Facc; // Sum of the first order statistics over the sessions for each client, dimension CD
-
-    // Accumulators for the M-step
-    blitz::Array<double,3> m_acc_V_A1;
-    blitz::Array<double,2> m_acc_V_A2;
-    blitz::Array<double,3> m_acc_U_A1;
-    blitz::Array<double,2> m_acc_U_A2;
-    blitz::Array<double,1> m_acc_D_A1;
-    blitz::Array<double,1> m_acc_D_A2;
-
-    // Cache/Precomputation
-    blitz::Array<double,2> m_cache_VtSigmaInv; // Vt * diag(sigma)^-1
-    blitz::Array<double,3> m_cache_VProd; // first dimension is the Gaussian id
-    blitz::Array<double,2> m_cache_IdPlusVProd_i;
-    blitz::Array<double,1> m_cache_Fn_y_i;
-
-    blitz::Array<double,2> m_cache_UtSigmaInv; // Ut * diag(sigma)^-1
-    blitz::Array<double,3> m_cache_UProd; // first dimension is the Gaussian id
-    blitz::Array<double,2> m_cache_IdPlusUProd_ih;
-    blitz::Array<double,1> m_cache_Fn_x_ih;
-
-    blitz::Array<double,1> m_cache_DtSigmaInv; // Dt * diag(sigma)^-1
-    blitz::Array<double,1> m_cache_DProd; // supervector length dimension
-    blitz::Array<double,1> m_cache_IdPlusDProd_i;
-    blitz::Array<double,1> m_cache_Fn_z_i;
-
-    // Working arrays
-    mutable blitz::Array<double,2> m_tmp_ruru;
-    mutable blitz::Array<double,2> m_tmp_ruD;
-    mutable blitz::Array<double,2> m_tmp_rvrv;
-    mutable blitz::Array<double,2> m_tmp_rvD;
-    mutable blitz::Array<double,1> m_tmp_rv;
-    mutable blitz::Array<double,1> m_tmp_ru;
-    mutable blitz::Array<double,1> m_tmp_CD;
-    mutable blitz::Array<double,1> m_tmp_CD_b;
-};
-
-
 class JFATrainer
 {
   public:
@@ -387,14 +67,14 @@ class JFATrainer
     /**
      * @brief Sets the maximum number of EM-like iterations (for each subspace)
      */
-    void setMaxIterations(const size_t max_iterations)
-    { m_max_iterations = max_iterations; }
+    //void setMaxIterations(const size_t max_iterations)
+    //{ m_max_iterations = max_iterations; }
 
     /**
      * @brief Gets the maximum number of EM-like iterations (for each subspace)
      */
-    size_t getMaxIterations() const
-    { return m_max_iterations; }
+    //size_t getMaxIterations() const
+    //{ return m_max_iterations; }
 
     /**
      * @brief This methods performs some initialization before the EM loop.
@@ -454,13 +134,13 @@ class JFATrainer
     /**
      * @brief This methods performs the main loops to train the subspaces U, V and d
      */
-    virtual void train_loop(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
+    //virtual void train_loop(bob::learn::misc::JFABase& machine,
+      //const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods trains the subspaces U, V and d
      */
-    virtual void train(bob::learn::misc::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
+    //virtual void train(bob::learn::misc::JFABase& machine,
+      //const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief Enrol a client
@@ -548,139 +228,11 @@ class JFATrainer
 
   private:
     // Attributes
-    size_t m_max_iterations;
+    //size_t m_max_iterations;
     boost::shared_ptr<boost::mt19937> m_rng; ///< The random number generator for the inialization
     bob::learn::misc::FABaseTrainer m_base_trainer;
 };
 
-
-class ISVTrainer: public EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
-{
-  public:
-    /**
-     * @brief Constructor
-     */
-    ISVTrainer(const size_t max_iterations=10, const double relevance_factor=4.);
-
-    /**
-     * @brief Copy onstructor
-     */
-    ISVTrainer(const ISVTrainer& other);
-
-    /**
-     * @brief Destructor
-     */
-    virtual ~ISVTrainer();
-
-    /**
-     * @brief Assignment operator
-     */
-    ISVTrainer& operator=(const ISVTrainer& other);
-
-    /**
-     * @brief Equal to
-     */
-    bool operator==(const ISVTrainer& b) const;
-
-    /**
-     * @brief Not equal to
-     */
-    bool operator!=(const ISVTrainer& b) const;
-
-    /**
-     * @brief Similar to
-     */
-    bool is_similar_to(const ISVTrainer& b, const double r_epsilon=1e-5,
-      const double a_epsilon=1e-8) const;
-
-    /**
-     * @brief This methods performs some initialization before the EM loop.
-     */
-    virtual void initialize(bob::learn::misc::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-    /**
-     * @brief This methods performs some actions after the EM loop.
-     */
-    virtual void finalize(bob::learn::misc::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief Calculates and saves statistics across the dataset
-     * The statistics will be used in the mStep() that follows.
-     */
-    virtual void eStep(bob::learn::misc::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief Performs a maximization step to update the parameters of the
-     * factor analysis model.
-     */
-    virtual void mStep(bob::learn::misc::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
-
-    /**
-     * @brief Computes the average log likelihood using the current estimates
-     * of the latent variables.
-     */
-    virtual double computeLikelihood(bob::learn::misc::ISVBase& machine);
-
-    /**
-     * @brief Enrol a client
-     */
-    void enrol(bob::learn::misc::ISVMachine& machine,
-      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& features,
-      const size_t n_iter);
-
-    /**
-     * @brief Get the x speaker factors
-     */
-    const std::vector<blitz::Array<double,2> >& getX() const
-    { return m_base_trainer.getX(); }
-    /**
-     * @brief Get the z speaker factors
-     */
-    const std::vector<blitz::Array<double,1> >& getZ() const
-    { return m_base_trainer.getZ(); }
-    /**
-     * @brief Set the x speaker factors
-     */
-    void setX(const std::vector<blitz::Array<double,2> >& X)
-    { m_base_trainer.setX(X); }
-    /**
-     * @brief Set the z speaker factors
-     */
-    void setZ(const std::vector<blitz::Array<double,1> >& z)
-    { m_base_trainer.setZ(z); }
-
-    /**
-     * @brief Getters for the accumulators
-     */
-    const blitz::Array<double,3>& getAccUA1() const
-    { return m_base_trainer.getAccUA1(); }
-    const blitz::Array<double,2>& getAccUA2() const
-    { return m_base_trainer.getAccUA2(); }
-
-    /**
-     * @brief Setters for the accumulators, Very useful if the e-Step needs
-     * to be parallelized.
-     */
-    void setAccUA1(const blitz::Array<double,3>& acc)
-    { m_base_trainer.setAccUA1(acc); }
-    void setAccUA2(const blitz::Array<double,2>& acc)
-    { m_base_trainer.setAccUA2(acc); }
-
-
-  private:
-    /**
-     * @brief Initialize D to sqrt(ubm_var/relevance_factor)
-     */
-    void initializeD(bob::learn::misc::ISVBase& machine) const;
-
-    // Attributes
-    bob::learn::misc::FABaseTrainer m_base_trainer;
-    double m_relevance_factor;
-};
-
 } } } // namespaces
 
-#endif /* BOB_LEARN_MISC_FATRAINER_H */
+#endif /* BOB_LEARN_MISC_JFATRAINER_H */
diff --git a/bob/learn/misc/jfa_trainer.cpp b/bob/learn/misc/jfa_trainer.cpp
new file mode 100644
index 0000000..9735aee
--- /dev/null
+++ b/bob/learn/misc/jfa_trainer.cpp
@@ -0,0 +1,873 @@
+/**
+ * @author Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+ * @date Sun 01 Fev 09:40:00 2015
+ *
+ * @brief Python API for bob::learn::em
+ *
+ * Copyright (C) 2011-2014 Idiap Research Institute, Martigny, Switzerland
+ */
+
+#include "main.h"
+#include <boost/make_shared.hpp>
+
+/******************************************************************/
+/************ Constructor Section *********************************/
+/******************************************************************/
+
+static int extract_GMMStats(PyObject *list,
+                             std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& training_data)
+{
+
+  for (size_t i=0; i<PyList_GET_SIZE(list); i++)
+  {
+    PyObject* another_list;
+    PyArg_Parse(PyList_GetItem(list, i), "O!", &PyList_Type, &another_list);
+
+    std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > another_training_data;
+    for (size_t j=0; j<PyList_GET_SIZE(another_list); j++){
+
+      PyBobLearnMiscGMMStatsObject* stats;
+      if (!PyArg_Parse(PyList_GetItem(another_list, j), "O!", &PyBobLearnMiscGMMStats_Type, &stats)){
+        PyErr_Format(PyExc_RuntimeError, "Expected GMMStats objects");
+        return -1;
+      }
+      another_training_data.push_back(stats->cxx);
+    }
+    training_data.push_back(another_training_data);
+  }
+  return 0;
+}
+
+template <int N>
+static PyObject* vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
+{
+  PyObject* list = PyList_New(vec.size());
+  for(size_t i=0; i<vec.size(); i++){
+    blitz::Array<double,N> numpy_array = vec[i];
+    PyObject* numpy_py_object = PyBlitzArrayCxx_AsNumpy(numpy_array);
+    PyList_SET_ITEM(list, i, numpy_py_object);
+  }
+  return list;
+}
+
+
+
+static auto JFATrainer_doc = bob::extension::ClassDoc(
+  BOB_EXT_MODULE_PREFIX ".JFATrainer",
+  "JFATrainer"
+  "References: [Vogt2008,McCool2013]",
+  ""
+).add_constructor(
+  bob::extension::FunctionDoc(
+    "__init__",
+    "Constructor. Builds a new JFATrainer",
+    "",
+    true
+  )
+  .add_prototype("other","")
+  .add_prototype("","")
+  .add_parameter("other", ":py:class:`bob.learn.misc.JFATrainer`", "A JFATrainer object to be copied.")
+);
+
+
+static int PyBobLearnMiscJFATrainer_init_copy(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+
+  char** kwlist = JFATrainer_doc.kwlist(0);
+  PyBobLearnMiscJFATrainerObject* o;
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!", kwlist, &PyBobLearnMiscJFATrainer_Type, &o)){
+    JFATrainer_doc.print_usage();
+    return -1;
+  }
+
+  self->cxx.reset(new bob::learn::misc::JFATrainer(*o->cxx));
+  return 0;
+}
+
+
+static int PyBobLearnMiscJFATrainer_init(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // get the number of command line arguments
+  int nargs = (args?PyTuple_Size(args):0) + (kwargs?PyDict_Size(kwargs):0);
+
+  switch(nargs){
+    case 0:{
+      self->cxx.reset(new bob::learn::misc::JFATrainer());
+      return 0;
+    }
+    case 1:{
+      // If the constructor input is JFATrainer object
+      return PyBobLearnMiscJFATrainer_init_copy(self, args, kwargs);
+    }
+    default:{
+      PyErr_Format(PyExc_RuntimeError, "number of arguments mismatch - %s requires only 0 and 1 argument, but you provided %d (see help)", Py_TYPE(self)->tp_name, nargs);
+      JFATrainer_doc.print_usage();
+      return -1;
+    }
+  }
+  BOB_CATCH_MEMBER("cannot create JFATrainer", 0)
+  return 0;
+}
+
+
+static void PyBobLearnMiscJFATrainer_delete(PyBobLearnMiscJFATrainerObject* self) {
+  self->cxx.reset();
+  Py_TYPE(self)->tp_free((PyObject*)self);
+}
+
+
+int PyBobLearnMiscJFATrainer_Check(PyObject* o) {
+  return PyObject_IsInstance(o, reinterpret_cast<PyObject*>(&PyBobLearnMiscJFATrainer_Type));
+}
+
+
+static PyObject* PyBobLearnMiscJFATrainer_RichCompare(PyBobLearnMiscJFATrainerObject* self, PyObject* other, int op) {
+  BOB_TRY
+
+  if (!PyBobLearnMiscJFATrainer_Check(other)) {
+    PyErr_Format(PyExc_TypeError, "cannot compare `%s' with `%s'", Py_TYPE(self)->tp_name, Py_TYPE(other)->tp_name);
+    return 0;
+  }
+  auto other_ = reinterpret_cast<PyBobLearnMiscJFATrainerObject*>(other);
+  switch (op) {
+    case Py_EQ:
+      if (*self->cxx==*other_->cxx) Py_RETURN_TRUE; else Py_RETURN_FALSE;
+    case Py_NE:
+      if (*self->cxx==*other_->cxx) Py_RETURN_FALSE; else Py_RETURN_TRUE;
+    default:
+      Py_INCREF(Py_NotImplemented);
+      return Py_NotImplemented;
+  }
+  BOB_CATCH_MEMBER("cannot compare JFATrainer objects", 0)
+}
+
+
+/******************************************************************/
+/************ Variables Section ***********************************/
+/******************************************************************/
+
+static auto acc_v_a1 = bob::extension::VariableDoc(
+  "acc_v_a1",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_acc_v_a1(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccVA1());
+  BOB_CATCH_MEMBER("acc_v_a1 could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_set_acc_v_a1(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_v_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_v_a1");
+  if (!b) return -1;
+  self->cxx->setAccVA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_v_a1 could not be set", -1)
+}
+
+
+static auto acc_v_a2 = bob::extension::VariableDoc(
+  "acc_v_a2",
+  "array_like <float, 2D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_acc_v_a2(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccVA2());
+  BOB_CATCH_MEMBER("acc_v_a2 could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_set_acc_v_a2(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_v_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_v_a2");
+  if (!b) return -1;
+  self->cxx->setAccVA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_v_a2 could not be set", -1)
+}
+
+
+static auto acc_u_a1 = bob::extension::VariableDoc(
+  "acc_u_a1",
+  "array_like <float, 3D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_acc_u_a1(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA1());
+  BOB_CATCH_MEMBER("acc_u_a1 could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_set_acc_u_a1(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 3D array of floats", Py_TYPE(self)->tp_name, acc_u_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,3>(o, "acc_u_a1");
+  if (!b) return -1;
+  self->cxx->setAccUA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_u_a1 could not be set", -1)
+}
+
+
+static auto acc_u_a2 = bob::extension::VariableDoc(
+  "acc_u_a2",
+  "array_like <float, 2D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_acc_u_a2(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccUA2());
+  BOB_CATCH_MEMBER("acc_u_a2 could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_set_acc_u_a2(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 2D array of floats", Py_TYPE(self)->tp_name, acc_u_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,2>(o, "acc_u_a2");
+  if (!b) return -1;
+  self->cxx->setAccUA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_u_a2 could not be set", -1)
+}
+
+
+static auto acc_d_a1 = bob::extension::VariableDoc(
+  "acc_d_a1",
+  "array_like <float, 1D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_acc_d_a1(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccDA1());
+  BOB_CATCH_MEMBER("acc_d_a1 could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_set_acc_d_a1(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_d_a1.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_d_a1");
+  if (!b) return -1;
+  self->cxx->setAccDA1(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_d_a1 could not be set", -1)
+}
+
+
+static auto acc_d_a2 = bob::extension::VariableDoc(
+  "acc_d_a2",
+  "array_like <float, 1D>",
+  "Accumulator updated during the E-step",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_acc_d_a2(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return PyBlitzArrayCxx_AsConstNumpy(self->cxx->getAccDA2());
+  BOB_CATCH_MEMBER("acc_d_a2 could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_set_acc_d_a2(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*){
+  BOB_TRY
+  PyBlitzArrayObject* o;
+  if (!PyBlitzArray_Converter(value, &o)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects a 1D array of floats", Py_TYPE(self)->tp_name, acc_d_a2.name());
+    return -1;
+  }
+  auto o_ = make_safe(o);
+  auto b = PyBlitzArrayCxx_AsBlitz<double,1>(o, "acc_d_a2");
+  if (!b) return -1;
+  self->cxx->setAccDA2(*b);
+  return 0;
+  BOB_CATCH_MEMBER("acc_d_a2 could not be set", -1)
+}
+
+
+static auto __X__ = bob::extension::VariableDoc(
+  "__X__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_X(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getX());
+  BOB_CATCH_MEMBER("__X__ could not be read", 0)
+}
+
+
+static auto __Y__ = bob::extension::VariableDoc(
+  "__Y__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_Y(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getY());
+  BOB_CATCH_MEMBER("__Y__ could not be read", 0)
+}
+
+
+static auto __Z__ = bob::extension::VariableDoc(
+  "__Z__",
+  "list",
+  "",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_get_Z(PyBobLearnMiscJFATrainerObject* self, void*){
+  BOB_TRY
+  return vector_as_list(self->cxx->getZ());
+  BOB_CATCH_MEMBER("__Z__ could not be read", 0)
+}
+
+
+
+/***** rng *****/
+static auto rng = bob::extension::VariableDoc(
+  "rng",
+  "str",
+  "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.",
+  ""
+);
+PyObject* PyBobLearnMiscJFATrainer_getRng(PyBobLearnMiscJFATrainerObject* self, void*) {
+  BOB_TRY
+  //Allocating the correspondent python object
+  
+  PyBoostMt19937Object* retval =
+    (PyBoostMt19937Object*)PyBoostMt19937_Type.tp_alloc(&PyBoostMt19937_Type, 0);
+
+  retval->rng = self->cxx->getRng().get();
+  return Py_BuildValue("O", retval);
+  BOB_CATCH_MEMBER("Rng method could not be read", 0)
+}
+int PyBobLearnMiscJFATrainer_setRng(PyBobLearnMiscJFATrainerObject* self, PyObject* value, void*) {
+  BOB_TRY
+
+  if (!PyBoostMt19937_Check(value)){
+    PyErr_Format(PyExc_RuntimeError, "%s %s expects an PyBoostMt19937_Check", Py_TYPE(self)->tp_name, rng.name());
+    return -1;
+  }
+
+  PyBoostMt19937Object* boostObject = 0;
+  PyBoostMt19937_Converter(value, &boostObject);
+  self->cxx->setRng((boost::shared_ptr<boost::mt19937>)boostObject->rng);
+
+  return 0;
+  BOB_CATCH_MEMBER("Rng could not be set", 0)
+}
+
+static PyGetSetDef PyBobLearnMiscJFATrainer_getseters[] = { 
+  {
+   acc_v_a1.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_acc_v_a1,
+   (setter)PyBobLearnMiscJFATrainer_get_acc_v_a1,
+   acc_v_a1.doc(),
+   0
+  },
+  {
+   acc_v_a2.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_acc_v_a2,
+   (setter)PyBobLearnMiscJFATrainer_get_acc_v_a2,
+   acc_v_a2.doc(),
+   0
+  },
+  {
+   acc_u_a1.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_acc_u_a1,
+   (setter)PyBobLearnMiscJFATrainer_get_acc_u_a1,
+   acc_u_a1.doc(),
+   0
+  },
+  {
+   acc_u_a2.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_acc_u_a2,
+   (setter)PyBobLearnMiscJFATrainer_get_acc_u_a2,
+   acc_u_a2.doc(),
+   0
+  },
+  {
+   acc_d_a1.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_acc_d_a1,
+   (setter)PyBobLearnMiscJFATrainer_get_acc_d_a1,
+   acc_d_a1.doc(),
+   0
+  },
+  {
+   acc_d_a2.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_acc_d_a2,
+   (setter)PyBobLearnMiscJFATrainer_get_acc_d_a2,
+   acc_d_a2.doc(),
+   0
+  },
+  {
+   rng.name(),
+   (getter)PyBobLearnMiscJFATrainer_getRng,
+   (setter)PyBobLearnMiscJFATrainer_setRng,
+   rng.doc(),
+   0
+  },
+  {
+   __X__.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_X,
+   0,
+   __X__.doc(),
+   0
+  },
+  {
+   __Y__.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_Y,
+   0,
+   __Y__.doc(),
+   0
+  },
+  {
+   __Z__.name(),
+   (getter)PyBobLearnMiscJFATrainer_get_Z,
+   0,
+   __Z__.doc(),
+   0
+  },
+  
+  
+
+  {0}  // Sentinel
+};
+
+
+/******************************************************************/
+/************ Functions Section ***********************************/
+/******************************************************************/
+
+/*** initialize ***/
+static auto initialize = bob::extension::FunctionDoc(
+  "initialize",
+  "Initialization before the EM steps",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_initialize(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  /* Parses input arguments in a single shot */
+  char** kwlist = initialize.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->initialize(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the initialize method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step1 ***/
+static auto e_step1 = bob::extension::FunctionDoc(
+  "e_step1",
+  "Call the 1st e-step procedure (for the V subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_e_step1(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  //Parses input arguments in a single shot
+  char** kwlist = e_step1.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->eStep1(*jfa_base->cxx, training_data);
+
+
+  BOB_CATCH_MEMBER("cannot perform the e_step1 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step1 ***/
+static auto m_step1 = bob::extension::FunctionDoc(
+  "m_step1",
+  "Call the 1st m-step procedure (for the V subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_m_step1(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = m_step1.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->mStep1(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step1 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize1 ***/
+static auto finalize1 = bob::extension::FunctionDoc(
+  "finalize1",
+  "Call the 1st finalize procedure (for the V subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_finalize1(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  //Parses input arguments in a single shot
+  char** kwlist = finalize1.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->finalize1(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize1 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step2 ***/
+static auto e_step2 = bob::extension::FunctionDoc(
+  "e_step2",
+  "Call the 2nd e-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_e_step2(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = e_step2.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->eStep2(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the e_step2 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step2 ***/
+static auto m_step2 = bob::extension::FunctionDoc(
+  "m_step2",
+  "Call the 2nd m-step procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_m_step2(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot 
+  char** kwlist = m_step2.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->mStep2(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step2 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize2 ***/
+static auto finalize2 = bob::extension::FunctionDoc(
+  "finalize2",
+  "Call the 2nd finalize procedure (for the U subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_finalize2(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = finalize2.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->finalize2(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize2 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** e_step3 ***/
+static auto e_step3 = bob::extension::FunctionDoc(
+  "e_step3",
+  "Call the 3rd e-step procedure (for the d subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_e_step3(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = e_step3.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->eStep3(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the e_step3 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** m_step3 ***/
+static auto m_step3 = bob::extension::FunctionDoc(
+  "m_step3",
+  "Call the 3rd m-step procedure (for the d subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_m_step3(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = m_step3.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->mStep3(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the m_step3 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+/*** finalize3 ***/
+static auto finalize3 = bob::extension::FunctionDoc(
+  "finalize3",
+  "Call the 3rd finalize procedure (for the d subspace).",
+  "",
+  true
+)
+.add_prototype("jfa_base,stats")
+.add_parameter("jfa_base", ":py:class:`bob.learn.misc.JFABase`", "JFABase Object")
+.add_parameter("stats", ":py:class:`bob.learn.misc.GMMStats`", "GMMStats Object");
+static PyObject* PyBobLearnMiscJFATrainer_finalize3(PyBobLearnMiscJFATrainerObject* self, PyObject* args, PyObject* kwargs) {
+  BOB_TRY
+
+  // Parses input arguments in a single shot
+  char** kwlist = finalize3.kwlist(0);
+
+  PyBobLearnMiscJFABaseObject* jfa_base = 0;
+  PyObject* stats = 0;
+
+  if (!PyArg_ParseTupleAndKeywords(args, kwargs, "O!O!", kwlist, &PyBobLearnMiscJFABase_Type, &jfa_base,
+                                                                 &PyList_Type, &stats)) Py_RETURN_NONE;
+
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
+  if(extract_GMMStats(stats ,training_data)==0)
+    self->cxx->finalize3(*jfa_base->cxx, training_data);
+
+  BOB_CATCH_MEMBER("cannot perform the finalize3 method", 0)
+
+  Py_RETURN_NONE;
+}
+
+
+
+
+static PyMethodDef PyBobLearnMiscJFATrainer_methods[] = {
+  {
+    initialize.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_initialize,
+    METH_VARARGS|METH_KEYWORDS,
+    initialize.doc()
+  },
+  {
+    e_step1.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_e_step1,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step1.doc()
+  },
+  {
+    e_step2.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_e_step2,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step2.doc()
+  },
+  {
+    e_step3.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_e_step3,
+    METH_VARARGS|METH_KEYWORDS,
+    e_step3.doc()
+  },
+  {
+    m_step1.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_m_step1,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step1.doc()
+  },
+  {
+    m_step2.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_m_step2,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step2.doc()
+  },
+  {
+    m_step3.name(),
+    (PyCFunction)PyBobLearnMiscJFATrainer_m_step3,
+    METH_VARARGS|METH_KEYWORDS,
+    m_step3.doc()
+  },
+
+  {0} /* Sentinel */
+};
+
+
+/******************************************************************/
+/************ Module Section **************************************/
+/******************************************************************/
+
+// Define the Gaussian type struct; will be initialized later
+PyTypeObject PyBobLearnMiscJFATrainer_Type = {
+  PyVarObject_HEAD_INIT(0,0)
+  0
+};
+
+bool init_BobLearnMiscJFATrainer(PyObject* module)
+{
+  // initialize the type struct
+  PyBobLearnMiscJFATrainer_Type.tp_name      = JFATrainer_doc.name();
+  PyBobLearnMiscJFATrainer_Type.tp_basicsize = sizeof(PyBobLearnMiscJFATrainerObject);
+  PyBobLearnMiscJFATrainer_Type.tp_flags     = Py_TPFLAGS_DEFAULT;
+  PyBobLearnMiscJFATrainer_Type.tp_doc       = JFATrainer_doc.doc();
+
+  // set the functions
+  PyBobLearnMiscJFATrainer_Type.tp_new          = PyType_GenericNew;
+  PyBobLearnMiscJFATrainer_Type.tp_init         = reinterpret_cast<initproc>(PyBobLearnMiscJFATrainer_init);
+  PyBobLearnMiscJFATrainer_Type.tp_dealloc      = reinterpret_cast<destructor>(PyBobLearnMiscJFATrainer_delete);
+  PyBobLearnMiscJFATrainer_Type.tp_richcompare = reinterpret_cast<richcmpfunc>(PyBobLearnMiscJFATrainer_RichCompare);
+  PyBobLearnMiscJFATrainer_Type.tp_methods      = PyBobLearnMiscJFATrainer_methods;
+  PyBobLearnMiscJFATrainer_Type.tp_getset       = PyBobLearnMiscJFATrainer_getseters;
+  //PyBobLearnMiscJFATrainer_Type.tp_call         = reinterpret_cast<ternaryfunc>(PyBobLearnMiscJFATrainer_compute_likelihood);
+
+
+  // check that everything is fine
+  if (PyType_Ready(&PyBobLearnMiscJFATrainer_Type) < 0) return false;
+
+  // add the type to the module
+  Py_INCREF(&PyBobLearnMiscJFATrainer_Type);
+  return PyModule_AddObject(module, "_JFATrainer", (PyObject*)&PyBobLearnMiscJFATrainer_Type) >= 0;
+}
+
diff --git a/bob/learn/misc/main.cpp b/bob/learn/misc/main.cpp
index 0bb13e2..b574888 100644
--- a/bob/learn/misc/main.cpp
+++ b/bob/learn/misc/main.cpp
@@ -68,16 +68,19 @@ static PyObject* create_module (void) {
   if (!init_BobLearnMiscGMMBaseTrainer(module)) return 0;
   if (!init_BobLearnMiscMLGMMTrainer(module)) return 0;  
   if (!init_BobLearnMiscMAPGMMTrainer(module)) return 0;
-  if (!init_BobLearnMiscJFABase(module)) return 0;
-  if (!init_BobLearnMiscISVBase(module)) return 0;
 
+  if (!init_BobLearnMiscJFABase(module)) return 0;
   if (!init_BobLearnMiscJFAMachine(module)) return 0;
+  if (!init_BobLearnMiscJFATrainer(module)) return 0;
+  if (!init_BobLearnMiscISVBase(module)) return 0;
   if (!init_BobLearnMiscISVMachine(module)) return 0;
+
   if (!init_BobLearnMiscIVectorMachine(module)) return 0;
   if (!init_BobLearnMiscPLDABase(module)) return 0;
   if (!init_BobLearnMiscPLDAMachine(module)) return 0;  
 
 
+
   static void* PyBobLearnMisc_API[PyBobLearnMisc_API_pointers];
 
   /* exhaustive list of C APIs */
diff --git a/bob/learn/misc/main.h b/bob/learn/misc/main.h
index a500855..d6049de 100644
--- a/bob/learn/misc/main.h
+++ b/bob/learn/misc/main.h
@@ -28,14 +28,17 @@
 #include <bob.learn.misc/MAP_GMMTrainer.h>
 
 #include <bob.learn.misc/JFABase.h>
-#include <bob.learn.misc/ISVBase.h>
-
 #include <bob.learn.misc/JFAMachine.h>
+#include <bob.learn.misc/JFATrainer.h>
+#include <bob.learn.misc/ISVBase.h>
 #include <bob.learn.misc/ISVMachine.h>
+
+
 #include <bob.learn.misc/IVectorMachine.h>
 #include <bob.learn.misc/PLDAMachine.h>
 #include <bob.learn.misc/ZTNorm.h>
 
+
 #include "ztnorm.cpp"
 
 
@@ -197,6 +200,16 @@ extern PyTypeObject PyBobLearnMiscJFAMachine_Type;
 bool init_BobLearnMiscJFAMachine(PyObject* module);
 int PyBobLearnMiscJFAMachine_Check(PyObject* o);
 
+// JFATrainer
+typedef struct {
+  PyObject_HEAD
+  boost::shared_ptr<bob::learn::misc::JFATrainer> cxx;
+} PyBobLearnMiscJFATrainerObject;
+
+
+extern PyTypeObject PyBobLearnMiscJFATrainer_Type;
+bool init_BobLearnMiscJFATrainer(PyObject* module);
+int PyBobLearnMiscJFATrainer_Check(PyObject* o);
 
 // ISVMachine
 typedef struct {
diff --git a/setup.py b/setup.py
index 903a69c..f0d5bcf 100644
--- a/setup.py
+++ b/setup.py
@@ -69,10 +69,13 @@ setup(
           "bob/learn/misc/cpp/JFAMachine.cpp",
           "bob/learn/misc/cpp/ISVMachine.cpp",
 
+          "bob/learn/misc/cpp/FABaseTrainer.cpp",
+          "bob/learn/misc/cpp/JFATrainer.cpp",
+          #"bob/learn/misc/cpp/ISVTrainer.cpp",
+
           #"bob/learn/misc/cpp/EMPCATrainer.cpp",
           "bob/learn/misc/cpp/GMMBaseTrainer.cpp",
           #"bob/learn/misc/cpp/IVectorTrainer.cpp",
-          #"bob/learn/misc/cpp/JFATrainer.cpp",
           "bob/learn/misc/cpp/KMeansTrainer.cpp",
           "bob/learn/misc/cpp/MAP_GMMTrainer.cpp",
           "bob/learn/misc/cpp/ML_GMMTrainer.cpp",
@@ -113,9 +116,11 @@ setup(
           "bob/learn/misc/gmm_base_trainer.cpp",
           "bob/learn/misc/ML_gmm_trainer.cpp",
           "bob/learn/misc/MAP_gmm_trainer.cpp",
+
           "bob/learn/misc/jfa_base.cpp",
-          "bob/learn/misc/isv_base.cpp",
           "bob/learn/misc/jfa_machine.cpp",
+          "bob/learn/misc/jfa_trainer.cpp",
+          "bob/learn/misc/isv_base.cpp",
           "bob/learn/misc/isv_machine.cpp",
           
           "bob/learn/misc/ivector_machine.cpp",
-- 
GitLab