From 9c239142676ac44663f75fd08e3e2b59af5efb11 Mon Sep 17 00:00:00 2001
From: Manuel Guenther <manuel.guenther@idiap.ch>
Date: Fri, 22 Aug 2014 15:38:50 +0200
Subject: [PATCH] Changed namespace of C++ code to bob::learn::misc.

---
 bob/learn/misc/cpp/BICMachine.cpp             |  28 +-
 bob/learn/misc/cpp/BICTrainer.cpp             |   2 +-
 bob/learn/misc/cpp/EMPCATrainer.cpp           |  52 ++--
 bob/learn/misc/cpp/GMMMachine.cpp             | 122 ++++----
 bob/learn/misc/cpp/GMMStats.cpp               |  54 ++--
 bob/learn/misc/cpp/GMMTrainer.cpp             |  42 +--
 bob/learn/misc/cpp/Gaussian.cpp               |  60 ++--
 bob/learn/misc/cpp/IVectorMachine.cpp         |  56 ++--
 bob/learn/misc/cpp/IVectorTrainer.cpp         |  72 ++---
 bob/learn/misc/cpp/JFAMachine.cpp             | 178 ++++++------
 bob/learn/misc/cpp/JFATrainer.cpp             | 254 ++++++++---------
 bob/learn/misc/cpp/KMeansMachine.cpp          |  66 +++--
 bob/learn/misc/cpp/KMeansTrainer.cpp          |  36 +--
 bob/learn/misc/cpp/LinearScoring.cpp          | 145 +++++-----
 bob/learn/misc/cpp/MAP_GMMTrainer.cpp         |  40 +--
 bob/learn/misc/cpp/ML_GMMTrainer.cpp          |  38 +--
 bob/learn/misc/cpp/PLDAMachine.cpp            | 176 ++++++------
 bob/learn/misc/cpp/PLDATrainer.cpp            |  84 +++---
 bob/learn/misc/cpp/WienerMachine.cpp          |  40 +--
 bob/learn/misc/cpp/WienerTrainer.cpp          |  24 +-
 bob/learn/misc/cpp/ZTNorm.cpp                 | 263 +++++++++---------
 .../misc/include/bob.learn.misc/BICMachine.h  |  18 +-
 .../misc/include/bob.learn.misc/BICTrainer.h  |  21 +-
 .../include/bob.learn.misc/EMPCATrainer.h     |  17 +-
 .../misc/include/bob.learn.misc/EMTrainer.h   |  18 +-
 .../misc/include/bob.learn.misc/GMMMachine.h  |  21 +-
 .../misc/include/bob.learn.misc/GMMStats.h    |  17 +-
 .../misc/include/bob.learn.misc/GMMTrainer.h  |  33 +--
 .../misc/include/bob.learn.misc/Gaussian.h    |  20 +-
 .../include/bob.learn.misc/IVectorMachine.h   |  35 +--
 .../include/bob.learn.misc/IVectorTrainer.h   |  37 +--
 .../misc/include/bob.learn.misc/JFAMachine.h  |  96 +++----
 .../misc/include/bob.learn.misc/JFATrainer.h  | 157 ++++++-----
 .../include/bob.learn.misc/KMeansMachine.h    |  18 +-
 .../include/bob.learn.misc/KMeansTrainer.h    |  31 +--
 .../include/bob.learn.misc/LinearScoring.h    |  35 +--
 .../include/bob.learn.misc/MAP_GMMTrainer.h   |  25 +-
 .../include/bob.learn.misc/ML_GMMTrainer.h    |  21 +-
 .../misc/include/bob.learn.misc/Machine.h     |  22 +-
 .../misc/include/bob.learn.misc/PLDAMachine.h |  23 +-
 .../misc/include/bob.learn.misc/PLDATrainer.h |  49 ++--
 .../misc/include/bob.learn.misc/Trainer.h     |  21 +-
 .../include/bob.learn.misc/WienerMachine.h    |  17 +-
 .../include/bob.learn.misc/WienerTrainer.h    |  21 +-
 .../misc/include/bob.learn.misc/ZTNorm.h      |  19 +-
 bob/learn/misc/old/bic.cc                     |  18 +-
 bob/learn/misc/old/bic_trainer.cc             |   6 +-
 bob/learn/misc/old/empca_trainer.cc           |  10 +-
 bob/learn/misc/old/gaussian.cc                |  40 +--
 bob/learn/misc/old/gmm.cc                     | 106 +++----
 bob/learn/misc/old/gmm_trainer.cc             |  26 +-
 bob/learn/misc/old/ivector.cc                 |  64 ++---
 bob/learn/misc/old/ivector_trainer.cc         |  54 ++--
 bob/learn/misc/old/jfa.cc                     | 196 ++++++-------
 bob/learn/misc/old/jfa_trainer.cc             | 158 +++++------
 bob/learn/misc/old/kmeans.cc                  |  48 ++--
 bob/learn/misc/old/kmeans_trainer.cc          |  38 +--
 bob/learn/misc/old/linearscoring.cc           |  28 +-
 bob/learn/misc/old/machine.cc                 |   6 +-
 bob/learn/misc/old/plda.cc                    | 132 ++++-----
 bob/learn/misc/old/plda_trainer.cc            |  62 ++---
 bob/learn/misc/old/wiener.cc                  |  38 +--
 bob/learn/misc/old/wiener_trainer.cc          |  14 +-
 bob/learn/misc/old/ztnorm.cc                  |   8 +-
 64 files changed, 1743 insertions(+), 1933 deletions(-)

diff --git a/bob/learn/misc/cpp/BICMachine.cpp b/bob/learn/misc/cpp/BICMachine.cpp
index e877b07..4bf047f 100644
--- a/bob/learn/misc/cpp/BICMachine.cpp
+++ b/bob/learn/misc/cpp/BICMachine.cpp
@@ -24,7 +24,7 @@
  *
  * @param use_DFFS  Add the Distance From Feature Space during score computation?
  */
-bob::machine::BICMachine::BICMachine(bool use_DFFS)
+bob::learn::misc::BICMachine::BICMachine(bool use_DFFS)
 :
   m_project_data(use_DFFS),
   m_use_DFFS(use_DFFS)
@@ -36,7 +36,7 @@ bob::machine::BICMachine::BICMachine(bool use_DFFS)
  * @param  other  The other BICMachine to get a shallow copy of
  * @return a reference to *this
  */
-bob::machine::BICMachine::BICMachine(const BICMachine& other)
+bob::learn::misc::BICMachine::BICMachine(const BICMachine& other)
 :
   m_project_data(other.m_project_data),
   m_use_DFFS(other.m_use_DFFS)
@@ -56,7 +56,7 @@ bob::machine::BICMachine::BICMachine(const BICMachine& other)
  * @param  other  The other BICMachine to get a deep copy of
  * @return a reference to *this
  */
-bob::machine::BICMachine& bob::machine::BICMachine::operator=(const BICMachine& other)
+bob::learn::misc::BICMachine& bob::learn::misc::BICMachine::operator=(const BICMachine& other)
 {
   if (this != &other)
   {
@@ -79,7 +79,7 @@ bob::machine::BICMachine& bob::machine::BICMachine::operator=(const BICMachine&
  * @param  other  The BICMachine to compare with
  * @return true if both machines are identical, i.e., have exactly the same parameters, otherwise false
  */
-bool bob::machine::BICMachine::operator==(const BICMachine& other) const
+bool bob::learn::misc::BICMachine::operator==(const BICMachine& other) const
 {
   return (m_project_data == other.m_project_data &&
           (!m_project_data || m_use_DFFS == other.m_use_DFFS) &&
@@ -99,7 +99,7 @@ bool bob::machine::BICMachine::operator==(const BICMachine& other) const
  * @param  other  The BICMachine to compare with
  * @return false if both machines are identical, i.e., have exactly the same parameters, otherwise true
  */
-bool bob::machine::BICMachine::operator!=(const BICMachine& other) const
+bool bob::learn::misc::BICMachine::operator!=(const BICMachine& other) const
 {
   return !(this->operator==(other));
 }
@@ -113,7 +113,7 @@ bool bob::machine::BICMachine::operator!=(const BICMachine& other) const
 
  * @return true if both machines are approximately equal, otherwise false
  */
-bool bob::machine::BICMachine::is_similar_to(const BICMachine& other,
+bool bob::learn::misc::BICMachine::is_similar_to(const BICMachine& other,
   const double r_epsilon, const double a_epsilon) const
 {
   if (m_project_data){
@@ -150,7 +150,7 @@ bool bob::machine::BICMachine::is_similar_to(const BICMachine& other,
 
 
 
-void bob::machine::BICMachine::initialize(bool clazz, int input_length, int projected_length){
+void bob::learn::misc::BICMachine::initialize(bool clazz, int input_length, int projected_length){
   blitz::Array<double,1>& diff = clazz ? m_diff_E : m_diff_I;
   blitz::Array<double,1>& proj = clazz ? m_proj_E : m_proj_I;
   diff.resize(input_length);
@@ -165,7 +165,7 @@ void bob::machine::BICMachine::initialize(bool clazz, int input_length, int proj
  * @param  variances  The variances of the training data
  * @param  copy_data  If true, makes a deep copy of the matrices, otherwise it just references it (the default)
  */
-void bob::machine::BICMachine::setIEC(
+void bob::learn::misc::BICMachine::setIEC(
     bool clazz,
     const blitz::Array<double,1>& mean,
     const blitz::Array<double,1>& variances,
@@ -198,7 +198,7 @@ void bob::machine::BICMachine::setIEC(
  * @param  rho     The residual eigenvalues, used for DFFS calculation
  * @param  copy_data  If true, makes a deep copy of the matrices, otherwise it just references it (the default)
  */
-void bob::machine::BICMachine::setBIC(
+void bob::learn::misc::BICMachine::setBIC(
     bool clazz,
     const blitz::Array<double,1>& mean,
     const blitz::Array<double,1>& variances,
@@ -240,7 +240,7 @@ void bob::machine::BICMachine::setBIC(
  *
  * @param use_DFFS The new value of use_DFFS
  */
-void bob::machine::BICMachine::use_DFFS(bool use_DFFS){
+void bob::learn::misc::BICMachine::use_DFFS(bool use_DFFS){
   m_use_DFFS = use_DFFS;
   if (m_project_data && m_use_DFFS && (m_rho_E < 1e-12 || m_rho_I < 1e-12)) std::runtime_error("The average eigenvalue (rho) is too close to zero, so using DFFS will not work");
 }
@@ -250,7 +250,7 @@ void bob::machine::BICMachine::use_DFFS(bool use_DFFS){
  *
  * @param  config  The hdf5 file containing the required information.
  */
-void bob::machine::BICMachine::load(bob::io::base::HDF5File& config){
+void bob::learn::misc::BICMachine::load(bob::io::base::HDF5File& config){
   //reads all data directly into the member variables
   m_project_data = config.read<bool>("project_data");
   m_mu_I.reference(config.readArray<double,1>("intra_mean"));
@@ -279,7 +279,7 @@ void bob::machine::BICMachine::load(bob::io::base::HDF5File& config){
  *
  * @param  config  The hdf5 file to write the configuration into.
  */
-void bob::machine::BICMachine::save(bob::io::base::HDF5File& config) const{
+void bob::learn::misc::BICMachine::save(bob::io::base::HDF5File& config) const{
   config.set("project_data", m_project_data);
   config.setArray("intra_mean", m_mu_I);
   config.setArray("intra_variance", m_lambda_I);
@@ -305,7 +305,7 @@ void bob::machine::BICMachine::save(bob::io::base::HDF5File& config) const{
  * @param  input  A vector (of difference values) to compute the BIC or IEC score for.
  * @param  output The one-element array that will contain the score afterwards.
  */
-void bob::machine::BICMachine::forward_(const blitz::Array<double,1>& input, double& output) const{
+void bob::learn::misc::BICMachine::forward_(const blitz::Array<double,1>& input, double& output) const{
   if (m_project_data){
     // subtract mean
     m_diff_I = input - m_mu_I;
@@ -338,7 +338,7 @@ void bob::machine::BICMachine::forward_(const blitz::Array<double,1>& input, dou
  * @param  input  A vector (of difference values) to compute the BIC or IEC score for.
  * @param  output The one-element array that will contain the score afterwards.
  */
-void bob::machine::BICMachine::forward(const blitz::Array<double,1>& input, double& output) const{
+void bob::learn::misc::BICMachine::forward(const blitz::Array<double,1>& input, double& output) const{
   // perform some checks
   bob::core::array::assertSameShape(input, m_mu_E);
 
diff --git a/bob/learn/misc/cpp/BICTrainer.cpp b/bob/learn/misc/cpp/BICTrainer.cpp
index 5c52a28..475fad9 100644
--- a/bob/learn/misc/cpp/BICTrainer.cpp
+++ b/bob/learn/misc/cpp/BICTrainer.cpp
@@ -21,7 +21,7 @@ static double sqr(const double& x){
  * @param  machine  The machine to be trained.
  * @param  differences  A set of (intra/extra)-personal difference vectors that should be trained.
  */
-void bob::trainer::BICTrainer::train_single(bool clazz, bob::machine::BICMachine& machine, const blitz::Array<double,2>& differences) const {
+void bob::learn::misc::BICTrainer::train_single(bool clazz, bob::learn::misc::BICMachine& machine, const blitz::Array<double,2>& differences) const {
   int subspace_dim = clazz ? m_M_E : m_M_I;
   int input_dim = differences.extent(1);
   int data_count = differences.extent(0);
diff --git a/bob/learn/misc/cpp/EMPCATrainer.cpp b/bob/learn/misc/cpp/EMPCATrainer.cpp
index e988145..6149393 100644
--- a/bob/learn/misc/cpp/EMPCATrainer.cpp
+++ b/bob/learn/misc/cpp/EMPCATrainer.cpp
@@ -18,7 +18,7 @@
 #include <bob.math/inv.h>
 #include <bob.math/stats.h>
 
-bob::trainer::EMPCATrainer::EMPCATrainer(double convergence_threshold,
+bob::learn::misc::EMPCATrainer::EMPCATrainer(double convergence_threshold,
     size_t max_iterations, bool compute_likelihood):
   EMTrainer<bob::learn::linear::Machine, blitz::Array<double,2> >(convergence_threshold,
     max_iterations, compute_likelihood),
@@ -32,7 +32,7 @@ bob::trainer::EMPCATrainer::EMPCATrainer(double convergence_threshold,
 {
 }
 
-bob::trainer::EMPCATrainer::EMPCATrainer(const bob::trainer::EMPCATrainer& other):
+bob::learn::misc::EMPCATrainer::EMPCATrainer(const bob::learn::misc::EMPCATrainer& other):
   EMTrainer<bob::learn::linear::Machine, blitz::Array<double,2> >(other.m_convergence_threshold,
     other.m_max_iterations, other.m_compute_likelihood),
   m_S(bob::core::array::ccopy(other.m_S)),
@@ -53,16 +53,16 @@ bob::trainer::EMPCATrainer::EMPCATrainer(const bob::trainer::EMPCATrainer& other
 {
 }
 
-bob::trainer::EMPCATrainer::~EMPCATrainer()
+bob::learn::misc::EMPCATrainer::~EMPCATrainer()
 {
 }
 
-bob::trainer::EMPCATrainer& bob::trainer::EMPCATrainer::operator=
-  (const bob::trainer::EMPCATrainer& other)
+bob::learn::misc::EMPCATrainer& bob::learn::misc::EMPCATrainer::operator=
+  (const bob::learn::misc::EMPCATrainer& other)
 {
   if (this != &other)
   {
-    bob::trainer::EMTrainer<bob::learn::linear::Machine,
+    bob::learn::misc::EMTrainer<bob::learn::linear::Machine,
       blitz::Array<double,2> >::operator=(other);
     m_S = bob::core::array::ccopy(other.m_S);
     m_z_first_order = bob::core::array::ccopy(other.m_z_first_order);
@@ -84,10 +84,10 @@ bob::trainer::EMPCATrainer& bob::trainer::EMPCATrainer::operator=
   return *this;
 }
 
-bool bob::trainer::EMPCATrainer::operator==
-  (const bob::trainer::EMPCATrainer &other) const
+bool bob::learn::misc::EMPCATrainer::operator==
+  (const bob::learn::misc::EMPCATrainer &other) const
 {
-  return bob::trainer::EMTrainer<bob::learn::linear::Machine,
+  return bob::learn::misc::EMTrainer<bob::learn::linear::Machine,
            blitz::Array<double,2> >::operator==(other) &&
         bob::core::array::isEqual(m_S, other.m_S) &&
         bob::core::array::isEqual(m_z_first_order, other.m_z_first_order) &&
@@ -98,17 +98,17 @@ bool bob::trainer::EMPCATrainer::operator==
         m_f_log2pi == other.m_f_log2pi;
 }
 
-bool bob::trainer::EMPCATrainer::operator!=
-  (const bob::trainer::EMPCATrainer &other) const
+bool bob::learn::misc::EMPCATrainer::operator!=
+  (const bob::learn::misc::EMPCATrainer &other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::EMPCATrainer::is_similar_to
-  (const bob::trainer::EMPCATrainer &other, const double r_epsilon,
+bool bob::learn::misc::EMPCATrainer::is_similar_to
+  (const bob::learn::misc::EMPCATrainer &other, const double r_epsilon,
    const double a_epsilon) const
 {
-  return bob::trainer::EMTrainer<bob::learn::linear::Machine,
+  return bob::learn::misc::EMTrainer<bob::learn::linear::Machine,
            blitz::Array<double,2> >::is_similar_to(other, r_epsilon, a_epsilon) &&
         bob::core::array::isClose(m_S, other.m_S, r_epsilon, a_epsilon) &&
         bob::core::array::isClose(m_z_first_order, other.m_z_first_order, r_epsilon, a_epsilon) &&
@@ -119,7 +119,7 @@ bool bob::trainer::EMPCATrainer::is_similar_to
         bob::core::isClose(m_f_log2pi, other.m_f_log2pi, r_epsilon, a_epsilon);
 }
 
-void bob::trainer::EMPCATrainer::initialize(bob::learn::linear::Machine& machine,
+void bob::learn::misc::EMPCATrainer::initialize(bob::learn::linear::Machine& machine,
   const blitz::Array<double,2>& ar)
 {
   // reinitializes array members and checks dimensionality
@@ -137,12 +137,12 @@ void bob::trainer::EMPCATrainer::initialize(bob::learn::linear::Machine& machine
   computeInvM();
 }
 
-void bob::trainer::EMPCATrainer::finalize(bob::learn::linear::Machine& machine,
+void bob::learn::misc::EMPCATrainer::finalize(bob::learn::linear::Machine& machine,
   const blitz::Array<double,2>& ar)
 {
 }
 
-void bob::trainer::EMPCATrainer::initMembers(
+void bob::learn::misc::EMPCATrainer::initMembers(
   const bob::learn::linear::Machine& machine,
   const blitz::Array<double,2>& ar)
 {
@@ -195,7 +195,7 @@ void bob::trainer::EMPCATrainer::initMembers(
   }
 }
 
-void bob::trainer::EMPCATrainer::computeMeanVariance(bob::learn::linear::Machine& machine,
+void bob::learn::misc::EMPCATrainer::computeMeanVariance(bob::learn::linear::Machine& machine,
   const blitz::Array<double,2>& ar)
 {
   size_t n_samples = ar.extent(0);
@@ -218,7 +218,7 @@ void bob::trainer::EMPCATrainer::computeMeanVariance(bob::learn::linear::Machine
   }
 }
 
-void bob::trainer::EMPCATrainer::initRandomWSigma2(bob::learn::linear::Machine& machine)
+void bob::learn::misc::EMPCATrainer::initRandomWSigma2(bob::learn::linear::Machine& machine)
 {
   // Initializes the random number generator
   boost::uniform_01<> range01;
@@ -234,14 +234,14 @@ void bob::trainer::EMPCATrainer::initRandomWSigma2(bob::learn::linear::Machine&
   m_sigma2 = die() * ratio;
 }
 
-void bob::trainer::EMPCATrainer::computeWtW(bob::learn::linear::Machine& machine)
+void bob::learn::misc::EMPCATrainer::computeWtW(bob::learn::linear::Machine& machine)
 {
   const blitz::Array<double,2> W = machine.getWeights();
   const blitz::Array<double,2> Wt = W.transpose(1,0);
   bob::math::prod(Wt, W, m_inW);
 }
 
-void bob::trainer::EMPCATrainer::computeInvM()
+void bob::learn::misc::EMPCATrainer::computeInvM()
 {
   // Compute inverse(M), where M = W^T * W + sigma2 * Id
   bob::math::eye(m_tmp_dxd_1); // m_tmp_dxd_1 = Id
@@ -252,7 +252,7 @@ void bob::trainer::EMPCATrainer::computeInvM()
 
 
 
-void bob::trainer::EMPCATrainer::eStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
+void bob::learn::misc::EMPCATrainer::eStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
 {
   // Gets mu and W from the machine
   const blitz::Array<double,1>& mu = machine.getInputSubtraction();
@@ -285,7 +285,7 @@ void bob::trainer::EMPCATrainer::eStep(bob::learn::linear::Machine& machine, con
   }
 }
 
-void bob::trainer::EMPCATrainer::mStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
+void bob::learn::misc::EMPCATrainer::mStep(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar)
 {
   // 1/ New estimate of W
   updateW(machine, ar);
@@ -297,7 +297,7 @@ void bob::trainer::EMPCATrainer::mStep(bob::learn::linear::Machine& machine, con
   computeInvM();
 }
 
-void bob::trainer::EMPCATrainer::updateW(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
+void bob::learn::misc::EMPCATrainer::updateW(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
   // Get the mean mu and the projection matrix W
   const blitz::Array<double,1>& mu = machine.getInputSubtraction();
   blitz::Array<double,2>& W = machine.updateWeights();
@@ -330,7 +330,7 @@ void bob::trainer::EMPCATrainer::updateW(bob::learn::linear::Machine& machine, c
   bob::math::prod(Wt, W, m_inW);
 }
 
-void bob::trainer::EMPCATrainer::updateSigma2(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
+void bob::learn::misc::EMPCATrainer::updateSigma2(bob::learn::linear::Machine& machine, const blitz::Array<double,2>& ar) {
   // Get the mean mu and the projection matrix W
   const blitz::Array<double,1>& mu = machine.getInputSubtraction();
   const blitz::Array<double,2>& W = machine.getWeights();
@@ -366,7 +366,7 @@ void bob::trainer::EMPCATrainer::updateSigma2(bob::learn::linear::Machine& machi
   m_sigma2 /= (static_cast<double>(ar.extent(0)) * mu.extent(0));
 }
 
-double bob::trainer::EMPCATrainer::computeLikelihood(bob::learn::linear::Machine& machine)
+double bob::learn::misc::EMPCATrainer::computeLikelihood(bob::learn::linear::Machine& machine)
 {
   // Get W projection matrix
   const blitz::Array<double,2>& W = machine.getWeights();
diff --git a/bob/learn/misc/cpp/GMMMachine.cpp b/bob/learn/misc/cpp/GMMMachine.cpp
index 727e690..2d81331 100644
--- a/bob/learn/misc/cpp/GMMMachine.cpp
+++ b/bob/learn/misc/cpp/GMMMachine.cpp
@@ -10,29 +10,29 @@
 #include <bob.core/assert.h>
 #include <bob.math/log.h>
 
-bob::machine::GMMMachine::GMMMachine(): m_gaussians(0) {
+bob::learn::misc::GMMMachine::GMMMachine(): m_gaussians(0) {
   resize(0,0);
 }
 
-bob::machine::GMMMachine::GMMMachine(const size_t n_gaussians, const size_t n_inputs):
+bob::learn::misc::GMMMachine::GMMMachine(const size_t n_gaussians, const size_t n_inputs):
   m_gaussians(0)
 {
   resize(n_gaussians,n_inputs);
 }
 
-bob::machine::GMMMachine::GMMMachine(bob::io::base::HDF5File& config):
+bob::learn::misc::GMMMachine::GMMMachine(bob::io::base::HDF5File& config):
   m_gaussians(0)
 {
   load(config);
 }
 
-bob::machine::GMMMachine::GMMMachine(const GMMMachine& other):
+bob::learn::misc::GMMMachine::GMMMachine(const GMMMachine& other):
   Machine<blitz::Array<double,1>, double>(other), m_gaussians(0)
 {
   copy(other);
 }
 
-bob::machine::GMMMachine& bob::machine::GMMMachine::operator=(const bob::machine::GMMMachine &other) {
+bob::learn::misc::GMMMachine& bob::learn::misc::GMMMachine::operator=(const bob::learn::misc::GMMMachine &other) {
   // protect against invalid self-assignment
   if (this != &other)
     copy(other);
@@ -41,7 +41,7 @@ bob::machine::GMMMachine& bob::machine::GMMMachine::operator=(const bob::machine
   return *this;
 }
 
-bool bob::machine::GMMMachine::operator==(const bob::machine::GMMMachine& b) const
+bool bob::learn::misc::GMMMachine::operator==(const bob::learn::misc::GMMMachine& b) const
 {
   if (m_n_gaussians != b.m_n_gaussians || m_n_inputs != b.m_n_inputs ||
       !bob::core::array::isEqual(m_weights, b.m_weights))
@@ -55,11 +55,11 @@ bool bob::machine::GMMMachine::operator==(const bob::machine::GMMMachine& b) con
   return true;
 }
 
-bool bob::machine::GMMMachine::operator!=(const bob::machine::GMMMachine& b) const {
+bool bob::learn::misc::GMMMachine::operator!=(const bob::learn::misc::GMMMachine& b) const {
   return !(this->operator==(b));
 }
 
-bool bob::machine::GMMMachine::is_similar_to(const bob::machine::GMMMachine& b,
+bool bob::learn::misc::GMMMachine::is_similar_to(const bob::learn::misc::GMMMachine& b,
   const double r_epsilon, const double a_epsilon) const
 {
   if (m_n_gaussians != b.m_n_gaussians || m_n_inputs != b.m_n_inputs ||
@@ -73,7 +73,7 @@ bool bob::machine::GMMMachine::is_similar_to(const bob::machine::GMMMachine& b,
   return true;
 }
 
-void bob::machine::GMMMachine::copy(const GMMMachine& other) {
+void bob::learn::misc::GMMMachine::copy(const GMMMachine& other) {
   m_n_gaussians = other.m_n_gaussians;
   m_n_inputs = other.m_n_inputs;
 
@@ -84,7 +84,7 @@ void bob::machine::GMMMachine::copy(const GMMMachine& other) {
   // Initialise Gaussians
   m_gaussians.clear();
   for(size_t i=0; i<m_n_gaussians; ++i) {
-    boost::shared_ptr<bob::machine::Gaussian> g(new bob::machine::Gaussian(*(other.m_gaussians[i])));
+    boost::shared_ptr<bob::learn::misc::Gaussian> g(new bob::learn::misc::Gaussian(*(other.m_gaussians[i])));
     m_gaussians.push_back(g);
   }
 
@@ -92,13 +92,13 @@ void bob::machine::GMMMachine::copy(const GMMMachine& other) {
   initCache();
 }
 
-bob::machine::GMMMachine::~GMMMachine() { }
+bob::learn::misc::GMMMachine::~GMMMachine() { }
 
-void bob::machine::GMMMachine::setNInputs(const size_t n_inputs) {
+void bob::learn::misc::GMMMachine::setNInputs(const size_t n_inputs) {
   resize(m_n_gaussians,n_inputs);
 }
 
-void bob::machine::GMMMachine::resize(const size_t n_gaussians, const size_t n_inputs) {
+void bob::learn::misc::GMMMachine::resize(const size_t n_gaussians, const size_t n_inputs) {
   m_n_gaussians = n_gaussians;
   m_n_inputs = n_inputs;
 
@@ -109,25 +109,25 @@ void bob::machine::GMMMachine::resize(const size_t n_gaussians, const size_t n_i
   // Initialise Gaussians
   m_gaussians.clear();
   for(size_t i=0; i<m_n_gaussians; ++i)
-    m_gaussians.push_back(boost::shared_ptr<bob::machine::Gaussian>(new bob::machine::Gaussian(n_inputs)));
+    m_gaussians.push_back(boost::shared_ptr<bob::learn::misc::Gaussian>(new bob::learn::misc::Gaussian(n_inputs)));
 
   // Initialise cache arrays
   initCache();
 }
 
 
-void bob::machine::GMMMachine::setWeights(const blitz::Array<double,1> &weights) {
+void bob::learn::misc::GMMMachine::setWeights(const blitz::Array<double,1> &weights) {
   bob::core::array::assertSameShape(weights, m_weights);
   m_weights = weights;
   recomputeLogWeights();
 }
 
-void bob::machine::GMMMachine::recomputeLogWeights() const
+void bob::learn::misc::GMMMachine::recomputeLogWeights() const
 {
   m_cache_log_weights = blitz::log(m_weights);
 }
 
-void bob::machine::GMMMachine::setMeans(const blitz::Array<double,2> &means) {
+void bob::learn::misc::GMMMachine::setMeans(const blitz::Array<double,2> &means) {
   bob::core::array::assertSameDimensionLength(means.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(means.extent(1), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
@@ -135,27 +135,27 @@ void bob::machine::GMMMachine::setMeans(const blitz::Array<double,2> &means) {
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::getMeans(blitz::Array<double,2> &means) const {
+void bob::learn::misc::GMMMachine::getMeans(blitz::Array<double,2> &means) const {
   bob::core::array::assertSameDimensionLength(means.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(means.extent(1), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
     means(i,blitz::Range::all()) = m_gaussians[i]->getMean();
 }
 
-void bob::machine::GMMMachine::setMeanSupervector(const blitz::Array<double,1> &mean_supervector) {
+void bob::learn::misc::GMMMachine::setMeanSupervector(const blitz::Array<double,1> &mean_supervector) {
   bob::core::array::assertSameDimensionLength(mean_supervector.extent(0), m_n_gaussians*m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
     m_gaussians[i]->updateMean() = mean_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::getMeanSupervector(blitz::Array<double,1> &mean_supervector) const {
+void bob::learn::misc::GMMMachine::getMeanSupervector(blitz::Array<double,1> &mean_supervector) const {
   bob::core::array::assertSameDimensionLength(mean_supervector.extent(0), m_n_gaussians*m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
     mean_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1)) = m_gaussians[i]->getMean();
 }
 
-void bob::machine::GMMMachine::setVariances(const blitz::Array<double, 2 >& variances) {
+void bob::learn::misc::GMMMachine::setVariances(const blitz::Array<double, 2 >& variances) {
   bob::core::array::assertSameDimensionLength(variances.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(variances.extent(1), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i) {
@@ -165,14 +165,14 @@ void bob::machine::GMMMachine::setVariances(const blitz::Array<double, 2 >& vari
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::getVariances(blitz::Array<double, 2 >& variances) const {
+void bob::learn::misc::GMMMachine::getVariances(blitz::Array<double, 2 >& variances) const {
   bob::core::array::assertSameDimensionLength(variances.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(variances.extent(1), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
     variances(i,blitz::Range::all()) = m_gaussians[i]->getVariance();
 }
 
-void bob::machine::GMMMachine::setVarianceSupervector(const blitz::Array<double,1> &variance_supervector) {
+void bob::learn::misc::GMMMachine::setVarianceSupervector(const blitz::Array<double,1> &variance_supervector) {
   bob::core::array::assertSameDimensionLength(variance_supervector.extent(0), m_n_gaussians*m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i) {
     m_gaussians[i]->updateVariance() = variance_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1));
@@ -181,27 +181,27 @@ void bob::machine::GMMMachine::setVarianceSupervector(const blitz::Array<double,
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::getVarianceSupervector(blitz::Array<double,1> &variance_supervector) const {
+void bob::learn::misc::GMMMachine::getVarianceSupervector(blitz::Array<double,1> &variance_supervector) const {
   bob::core::array::assertSameDimensionLength(variance_supervector.extent(0), m_n_gaussians*m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i) {
     variance_supervector(blitz::Range(i*m_n_inputs, (i+1)*m_n_inputs-1)) = m_gaussians[i]->getVariance();
   }
 }
 
-void bob::machine::GMMMachine::setVarianceThresholds(const double value) {
+void bob::learn::misc::GMMMachine::setVarianceThresholds(const double value) {
   for(size_t i=0; i<m_n_gaussians; ++i)
     m_gaussians[i]->setVarianceThresholds(value);
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::setVarianceThresholds(blitz::Array<double, 1> variance_thresholds) {
+void bob::learn::misc::GMMMachine::setVarianceThresholds(blitz::Array<double, 1> variance_thresholds) {
   bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
     m_gaussians[i]->setVarianceThresholds(variance_thresholds);
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::setVarianceThresholds(const blitz::Array<double, 2>& variance_thresholds) {
+void bob::learn::misc::GMMMachine::setVarianceThresholds(const blitz::Array<double, 2>& variance_thresholds) {
   bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
@@ -209,14 +209,14 @@ void bob::machine::GMMMachine::setVarianceThresholds(const blitz::Array<double,
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::getVarianceThresholds(blitz::Array<double, 2>& variance_thresholds) const {
+void bob::learn::misc::GMMMachine::getVarianceThresholds(blitz::Array<double, 2>& variance_thresholds) const {
   bob::core::array::assertSameDimensionLength(variance_thresholds.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(variance_thresholds.extent(1), m_n_inputs);
   for(size_t i=0; i<m_n_gaussians; ++i)
     variance_thresholds(i,blitz::Range::all()) = m_gaussians[i]->getVarianceThresholds();
 }
 
-double bob::machine::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x,
+double bob::learn::misc::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x,
   blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
 {
   // Check dimension
@@ -225,7 +225,7 @@ double bob::machine::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x,
   return logLikelihood_(x,log_weighted_gaussian_likelihoods);
 }
 
-double bob::machine::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x,
+double bob::learn::misc::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x,
   blitz::Array<double,1> &log_weighted_gaussian_likelihoods) const
 {
   // Initialise variables
@@ -242,7 +242,7 @@ double bob::machine::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x
   return log_likelihood;
 }
 
-double bob::machine::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x) const {
+double bob::learn::misc::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x) const {
   // Check dimension
   bob::core::array::assertSameDimensionLength(x.extent(0), m_n_inputs);
   // Call the other logLikelihood_ (overloaded) function
@@ -250,13 +250,13 @@ double bob::machine::GMMMachine::logLikelihood(const blitz::Array<double, 1> &x)
   return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
 }
 
-double bob::machine::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x) const {
+double bob::learn::misc::GMMMachine::logLikelihood_(const blitz::Array<double, 1> &x) const {
   // Call the other logLikelihood (overloaded) function
   // (log_weighted_gaussian_likelihoods will be discarded)
   return logLikelihood_(x,m_cache_log_weighted_gaussian_likelihoods);
 }
 
-void bob::machine::GMMMachine::forward(const blitz::Array<double,1>& input, double& output) const {
+void bob::learn::misc::GMMMachine::forward(const blitz::Array<double,1>& input, double& output) const {
   if(static_cast<size_t>(input.extent(0)) != m_n_inputs) {
     boost::format m("expected input size (%u) does not match the size of input array (%d)");
     m % m_n_inputs % input.extent(0);
@@ -266,13 +266,13 @@ void bob::machine::GMMMachine::forward(const blitz::Array<double,1>& input, doub
   forward_(input,output);
 }
 
-void bob::machine::GMMMachine::forward_(const blitz::Array<double,1>& input,
+void bob::learn::misc::GMMMachine::forward_(const blitz::Array<double,1>& input,
     double& output) const {
   output = logLikelihood(input);
 }
 
-void bob::machine::GMMMachine::accStatistics(const blitz::Array<double,2>& input,
-    bob::machine::GMMStats& stats) const {
+void bob::learn::misc::GMMMachine::accStatistics(const blitz::Array<double,2>& input,
+    bob::learn::misc::GMMStats& stats) const {
   // iterate over data
   blitz::Range a = blitz::Range::all();
   for(int i=0; i<input.extent(0); ++i) {
@@ -283,7 +283,7 @@ void bob::machine::GMMMachine::accStatistics(const blitz::Array<double,2>& input
   }
 }
 
-void bob::machine::GMMMachine::accStatistics_(const blitz::Array<double,2>& input, bob::machine::GMMStats& stats) const {
+void bob::learn::misc::GMMMachine::accStatistics_(const blitz::Array<double,2>& input, bob::learn::misc::GMMStats& stats) const {
   // iterate over data
   blitz::Range a = blitz::Range::all();
   for(int i=0; i<input.extent(0); ++i) {
@@ -294,7 +294,7 @@ void bob::machine::GMMMachine::accStatistics_(const blitz::Array<double,2>& inpu
   }
 }
 
-void bob::machine::GMMMachine::accStatistics(const blitz::Array<double, 1>& x, bob::machine::GMMStats& stats) const {
+void bob::learn::misc::GMMMachine::accStatistics(const blitz::Array<double, 1>& x, bob::learn::misc::GMMStats& stats) const {
   // check GMMStats size
   bob::core::array::assertSameDimensionLength(stats.sumPx.extent(0), m_n_gaussians);
   bob::core::array::assertSameDimensionLength(stats.sumPx.extent(1), m_n_inputs);
@@ -307,7 +307,7 @@ void bob::machine::GMMMachine::accStatistics(const blitz::Array<double, 1>& x, b
   accStatisticsInternal(x, stats, log_likelihood);
 }
 
-void bob::machine::GMMMachine::accStatistics_(const blitz::Array<double, 1>& x, bob::machine::GMMStats& stats) const {
+void bob::learn::misc::GMMMachine::accStatistics_(const blitz::Array<double, 1>& x, bob::learn::misc::GMMStats& stats) const {
   // Calculate Gaussian and GMM likelihoods
   // - m_cache_log_weighted_gaussian_likelihoods(i) = log(weight_i*p(x|gaussian_i))
   // - log_likelihood = log(sum_i(weight_i*p(x|gaussian_i)))
@@ -316,8 +316,8 @@ void bob::machine::GMMMachine::accStatistics_(const blitz::Array<double, 1>& x,
   accStatisticsInternal(x, stats, log_likelihood);
 }
 
-void bob::machine::GMMMachine::accStatisticsInternal(const blitz::Array<double, 1>& x,
-  bob::machine::GMMStats& stats, const double log_likelihood) const
+void bob::learn::misc::GMMMachine::accStatisticsInternal(const blitz::Array<double, 1>& x,
+  bob::learn::misc::GMMStats& stats, const double log_likelihood) const
 {
   // Calculate responsibilities
   m_cache_P = blitz::exp(m_cache_log_weighted_gaussian_likelihoods - log_likelihood);
@@ -344,22 +344,22 @@ void bob::machine::GMMMachine::accStatisticsInternal(const blitz::Array<double,
   stats.sumPxx += (m_cache_Px(i,j) * x(j));
 }
 
-boost::shared_ptr<const bob::machine::Gaussian> bob::machine::GMMMachine::getGaussian(const size_t i) const {
+boost::shared_ptr<const bob::learn::misc::Gaussian> bob::learn::misc::GMMMachine::getGaussian(const size_t i) const {
   if (i>=m_n_gaussians) {
     throw std::runtime_error("getGaussian(): index out of bounds");
   }
-  boost::shared_ptr<const bob::machine::Gaussian> res = m_gaussians[i];
+  boost::shared_ptr<const bob::learn::misc::Gaussian> res = m_gaussians[i];
   return res;
 }
 
-boost::shared_ptr<bob::machine::Gaussian> bob::machine::GMMMachine::updateGaussian(const size_t i) {
+boost::shared_ptr<bob::learn::misc::Gaussian> bob::learn::misc::GMMMachine::updateGaussian(const size_t i) {
   if (i>=m_n_gaussians) {
     throw std::runtime_error("updateGaussian(): index out of bounds");
   }
   return m_gaussians[i];
 }
 
-void bob::machine::GMMMachine::save(bob::io::base::HDF5File& config) const {
+void bob::learn::misc::GMMMachine::save(bob::io::base::HDF5File& config) const {
   int64_t v = static_cast<int64_t>(m_n_gaussians);
   config.set("m_n_gaussians", v);
   v = static_cast<int64_t>(m_n_inputs);
@@ -378,7 +378,7 @@ void bob::machine::GMMMachine::save(bob::io::base::HDF5File& config) const {
   config.setArray("m_weights", m_weights);
 }
 
-void bob::machine::GMMMachine::load(bob::io::base::HDF5File& config) {
+void bob::learn::misc::GMMMachine::load(bob::io::base::HDF5File& config) {
   int64_t v;
   v = config.read<int64_t>("m_n_gaussians");
   m_n_gaussians = static_cast<size_t>(v);
@@ -387,7 +387,7 @@ void bob::machine::GMMMachine::load(bob::io::base::HDF5File& config) {
 
   m_gaussians.clear();
   for(size_t i=0; i<m_n_gaussians; ++i) {
-    m_gaussians.push_back(boost::shared_ptr<bob::machine::Gaussian>(new bob::machine::Gaussian(m_n_inputs)));
+    m_gaussians.push_back(boost::shared_ptr<bob::learn::misc::Gaussian>(new bob::learn::misc::Gaussian(m_n_inputs)));
     std::ostringstream oss;
     oss << "m_gaussians" << i;
     config.cd(oss.str());
@@ -402,7 +402,7 @@ void bob::machine::GMMMachine::load(bob::io::base::HDF5File& config) {
   initCache();
 }
 
-void bob::machine::GMMMachine::updateCacheSupervectors() const
+void bob::learn::misc::GMMMachine::updateCacheSupervectors() const
 {
   m_cache_mean_supervector.resize(m_n_gaussians*m_n_inputs);
   m_cache_variance_supervector.resize(m_n_gaussians*m_n_inputs);
@@ -415,7 +415,7 @@ void bob::machine::GMMMachine::updateCacheSupervectors() const
   m_cache_supervector = true;
 }
 
-void bob::machine::GMMMachine::initCache() const {
+void bob::learn::misc::GMMMachine::initCache() const {
   // Initialise cache arrays
   m_cache_log_weights.resize(m_n_gaussians);
   recomputeLogWeights();
@@ -425,32 +425,30 @@ void bob::machine::GMMMachine::initCache() const {
   m_cache_supervector = false;
 }
 
-void bob::machine::GMMMachine::reloadCacheSupervectors() const {
+void bob::learn::misc::GMMMachine::reloadCacheSupervectors() const {
   if(!m_cache_supervector)
     updateCacheSupervectors();
 }
 
-const blitz::Array<double,1>& bob::machine::GMMMachine::getMeanSupervector() const {
+const blitz::Array<double,1>& bob::learn::misc::GMMMachine::getMeanSupervector() const {
   if(!m_cache_supervector)
     updateCacheSupervectors();
   return m_cache_mean_supervector;
 }
 
-const blitz::Array<double,1>& bob::machine::GMMMachine::getVarianceSupervector() const {
+const blitz::Array<double,1>& bob::learn::misc::GMMMachine::getVarianceSupervector() const {
   if(!m_cache_supervector)
     updateCacheSupervectors();
   return m_cache_variance_supervector;
 }
 
-namespace bob {
-  namespace machine {
-    std::ostream& operator<<(std::ostream& os, const GMMMachine& machine) {
-      os << "Weights = " << machine.m_weights << std::endl;
-      for(size_t i=0; i < machine.m_n_gaussians; ++i) {
-        os << "Gaussian " << i << ": " << std::endl << *(machine.m_gaussians[i]);
-      }
-
-      return os;
+namespace bob { namespace learn { namespace misc {
+  std::ostream& operator<<(std::ostream& os, const GMMMachine& machine) {
+    os << "Weights = " << machine.m_weights << std::endl;
+    for(size_t i=0; i < machine.m_n_gaussians; ++i) {
+      os << "Gaussian " << i << ": " << std::endl << *(machine.m_gaussians[i]);
     }
+
+    return os;
   }
-}
+} } }
diff --git a/bob/learn/misc/cpp/GMMStats.cpp b/bob/learn/misc/cpp/GMMStats.cpp
index 5695cea..c0c25df 100644
--- a/bob/learn/misc/cpp/GMMStats.cpp
+++ b/bob/learn/misc/cpp/GMMStats.cpp
@@ -10,27 +10,27 @@
 #include <bob.core/logging.h>
 #include <bob.core/check.h>
 
-bob::machine::GMMStats::GMMStats() {
+bob::learn::misc::GMMStats::GMMStats() {
   resize(0,0);
 }
 
-bob::machine::GMMStats::GMMStats(const size_t n_gaussians, const size_t n_inputs) {
+bob::learn::misc::GMMStats::GMMStats(const size_t n_gaussians, const size_t n_inputs) {
   resize(n_gaussians,n_inputs);
 }
 
-bob::machine::GMMStats::GMMStats(bob::io::base::HDF5File& config) {
+bob::learn::misc::GMMStats::GMMStats(bob::io::base::HDF5File& config) {
   load(config);
 }
 
-bob::machine::GMMStats::GMMStats(const bob::machine::GMMStats& other) {
+bob::learn::misc::GMMStats::GMMStats(const bob::learn::misc::GMMStats& other) {
   copy(other);
 }
 
-bob::machine::GMMStats::~GMMStats() {
+bob::learn::misc::GMMStats::~GMMStats() {
 }
 
-bob::machine::GMMStats&
-bob::machine::GMMStats::operator=(const bob::machine::GMMStats& other) {
+bob::learn::misc::GMMStats&
+bob::learn::misc::GMMStats::operator=(const bob::learn::misc::GMMStats& other) {
   // protect against invalid self-assignment
   if (this != &other)
     copy(other);
@@ -39,7 +39,7 @@ bob::machine::GMMStats::operator=(const bob::machine::GMMStats& other) {
   return *this;
 }
 
-bool bob::machine::GMMStats::operator==(const bob::machine::GMMStats& b) const
+bool bob::learn::misc::GMMStats::operator==(const bob::learn::misc::GMMStats& b) const
 {
   return (T == b.T && log_likelihood == b.log_likelihood &&
           bob::core::array::isEqual(n, b.n) &&
@@ -48,12 +48,12 @@ bool bob::machine::GMMStats::operator==(const bob::machine::GMMStats& b) const
 }
 
 bool
-bob::machine::GMMStats::operator!=(const bob::machine::GMMStats& b) const
+bob::learn::misc::GMMStats::operator!=(const bob::learn::misc::GMMStats& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::GMMStats::is_similar_to(const bob::machine::GMMStats& b,
+bool bob::learn::misc::GMMStats::is_similar_to(const bob::learn::misc::GMMStats& b,
   const double r_epsilon, const double a_epsilon) const
 {
   return (T == b.T &&
@@ -64,7 +64,7 @@ bool bob::machine::GMMStats::is_similar_to(const bob::machine::GMMStats& b,
 }
 
 
-void bob::machine::GMMStats::operator+=(const bob::machine::GMMStats& b) {
+void bob::learn::misc::GMMStats::operator+=(const bob::learn::misc::GMMStats& b) {
   // Check dimensions
   if(n.extent(0) != b.n.extent(0) ||
       sumPx.extent(0) != b.sumPx.extent(0) || sumPx.extent(1) != b.sumPx.extent(1) ||
@@ -80,7 +80,7 @@ void bob::machine::GMMStats::operator+=(const bob::machine::GMMStats& b) {
   sumPxx += b.sumPxx;
 }
 
-void bob::machine::GMMStats::copy(const GMMStats& other) {
+void bob::learn::misc::GMMStats::copy(const GMMStats& other) {
   // Resize arrays
   resize(other.sumPx.extent(0),other.sumPx.extent(1));
   // Copy content
@@ -91,14 +91,14 @@ void bob::machine::GMMStats::copy(const GMMStats& other) {
   sumPxx = other.sumPxx;
 }
 
-void bob::machine::GMMStats::resize(const size_t n_gaussians, const size_t n_inputs) {
+void bob::learn::misc::GMMStats::resize(const size_t n_gaussians, const size_t n_inputs) {
   n.resize(n_gaussians);
   sumPx.resize(n_gaussians, n_inputs);
   sumPxx.resize(n_gaussians, n_inputs);
   init();
 }
 
-void bob::machine::GMMStats::init() {
+void bob::learn::misc::GMMStats::init() {
   log_likelihood = 0;
   T = 0;
   n = 0.0;
@@ -106,7 +106,7 @@ void bob::machine::GMMStats::init() {
   sumPxx = 0.0;
 }
 
-void bob::machine::GMMStats::save(bob::io::base::HDF5File& config) const {
+void bob::learn::misc::GMMStats::save(bob::io::base::HDF5File& config) const {
   //please note we fix the output values to be of a precise type so they can be
   //retrieved at any platform with the exact same precision.
   // TODO: add versioning, replace int64_t by uint64_t and log_liklihood by log_likelihood
@@ -121,7 +121,7 @@ void bob::machine::GMMStats::save(bob::io::base::HDF5File& config) const {
   config.setArray("sumPxx", sumPxx); //Array2d
 }
 
-void bob::machine::GMMStats::load(bob::io::base::HDF5File& config) {
+void bob::learn::misc::GMMStats::load(bob::io::base::HDF5File& config) {
   log_likelihood = config.read<double>("log_liklihood");
   int64_t n_gaussians = config.read<int64_t>("n_gaussians");
   int64_t n_inputs = config.read<int64_t>("n_inputs");
@@ -138,16 +138,14 @@ void bob::machine::GMMStats::load(bob::io::base::HDF5File& config) {
   config.readArray("sumPxx", sumPxx);
 }
 
-namespace bob {
-  namespace machine {
-    std::ostream& operator<<(std::ostream& os, const GMMStats& g) {
-      os << "log_likelihood = " << g.log_likelihood << std::endl;
-      os << "T = " << g.T << std::endl;
-      os << "n = " << g.n;
-      os << "sumPx = " << g.sumPx;
-      os << "sumPxx = " << g.sumPxx;
-
-      return os;
-    }
+namespace bob { namespace learn { namespace misc {
+  std::ostream& operator<<(std::ostream& os, const GMMStats& g) {
+    os << "log_likelihood = " << g.log_likelihood << std::endl;
+    os << "T = " << g.T << std::endl;
+    os << "n = " << g.n;
+    os << "sumPx = " << g.sumPx;
+    os << "sumPxx = " << g.sumPxx;
+
+    return os;
   }
-}
+} } }
diff --git a/bob/learn/misc/cpp/GMMTrainer.cpp b/bob/learn/misc/cpp/GMMTrainer.cpp
index 818dc65..e1eb8db 100644
--- a/bob/learn/misc/cpp/GMMTrainer.cpp
+++ b/bob/learn/misc/cpp/GMMTrainer.cpp
@@ -9,35 +9,35 @@
 #include <bob.core/assert.h>
 #include <bob.core/check.h>
 
-bob::trainer::GMMTrainer::GMMTrainer(const bool update_means,
+bob::learn::misc::GMMTrainer::GMMTrainer(const bool update_means,
     const bool update_variances, const bool update_weights,
     const double mean_var_update_responsibilities_threshold):
-  bob::trainer::EMTrainer<bob::machine::GMMMachine, blitz::Array<double,2> >(),
+  bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine, blitz::Array<double,2> >(),
   m_update_means(update_means), m_update_variances(update_variances),
   m_update_weights(update_weights),
   m_mean_var_update_responsibilities_threshold(mean_var_update_responsibilities_threshold)
 {
 }
 
-bob::trainer::GMMTrainer::GMMTrainer(const bob::trainer::GMMTrainer& b):
-  bob::trainer::EMTrainer<bob::machine::GMMMachine, blitz::Array<double,2> >(b),
+bob::learn::misc::GMMTrainer::GMMTrainer(const bob::learn::misc::GMMTrainer& b):
+  bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine, blitz::Array<double,2> >(b),
   m_update_means(b.m_update_means), m_update_variances(b.m_update_variances),
   m_mean_var_update_responsibilities_threshold(b.m_mean_var_update_responsibilities_threshold)
 {
 }
 
-bob::trainer::GMMTrainer::~GMMTrainer()
+bob::learn::misc::GMMTrainer::~GMMTrainer()
 {
 }
 
-void bob::trainer::GMMTrainer::initialize(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::GMMTrainer::initialize(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
   // Allocate memory for the sufficient statistics and initialise
   m_ss.resize(gmm.getNGaussians(),gmm.getNInputs());
 }
 
-void bob::trainer::GMMTrainer::eStep(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::GMMTrainer::eStep(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
   m_ss.init();
@@ -45,22 +45,22 @@ void bob::trainer::GMMTrainer::eStep(bob::machine::GMMMachine& gmm,
   gmm.accStatistics(data, m_ss);
 }
 
-double bob::trainer::GMMTrainer::computeLikelihood(bob::machine::GMMMachine& gmm)
+double bob::learn::misc::GMMTrainer::computeLikelihood(bob::learn::misc::GMMMachine& gmm)
 {
   return m_ss.log_likelihood / m_ss.T;
 }
 
-void bob::trainer::GMMTrainer::finalize(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::GMMTrainer::finalize(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
 }
 
-bob::trainer::GMMTrainer& bob::trainer::GMMTrainer::operator=
-  (const bob::trainer::GMMTrainer &other)
+bob::learn::misc::GMMTrainer& bob::learn::misc::GMMTrainer::operator=
+  (const bob::learn::misc::GMMTrainer &other)
 {
   if (this != &other)
   {
-    bob::trainer::EMTrainer<bob::machine::GMMMachine,
+    bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine,
       blitz::Array<double,2> >::operator=(other);
     m_ss = other.m_ss;
     m_update_means = other.m_update_means;
@@ -71,10 +71,10 @@ bob::trainer::GMMTrainer& bob::trainer::GMMTrainer::operator=
   return *this;
 }
 
-bool bob::trainer::GMMTrainer::operator==
-  (const bob::trainer::GMMTrainer &other) const
+bool bob::learn::misc::GMMTrainer::operator==
+  (const bob::learn::misc::GMMTrainer &other) const
 {
-  return bob::trainer::EMTrainer<bob::machine::GMMMachine,
+  return bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine,
            blitz::Array<double,2> >::operator==(other) &&
          m_ss == other.m_ss &&
          m_update_means == other.m_update_means &&
@@ -83,17 +83,17 @@ bool bob::trainer::GMMTrainer::operator==
          m_mean_var_update_responsibilities_threshold == other.m_mean_var_update_responsibilities_threshold;
 }
 
-bool bob::trainer::GMMTrainer::operator!=
-  (const bob::trainer::GMMTrainer &other) const
+bool bob::learn::misc::GMMTrainer::operator!=
+  (const bob::learn::misc::GMMTrainer &other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::GMMTrainer::is_similar_to
-  (const bob::trainer::GMMTrainer &other, const double r_epsilon,
+bool bob::learn::misc::GMMTrainer::is_similar_to
+  (const bob::learn::misc::GMMTrainer &other, const double r_epsilon,
    const double a_epsilon) const
 {
-  return bob::trainer::EMTrainer<bob::machine::GMMMachine,
+  return bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine,
            blitz::Array<double,2> >::operator==(other) &&
   // TODO: use is similar to method for the accumulator m_ss
          m_ss == other.m_ss &&
@@ -104,7 +104,7 @@ bool bob::trainer::GMMTrainer::is_similar_to
           other.m_mean_var_update_responsibilities_threshold, r_epsilon, a_epsilon);
 }
 
-void bob::trainer::GMMTrainer::setGMMStats(const bob::machine::GMMStats& stats)
+void bob::learn::misc::GMMTrainer::setGMMStats(const bob::learn::misc::GMMStats& stats)
 {
   bob::core::array::assertSameShape(m_ss.sumPx, stats.sumPx);
   m_ss = stats;
diff --git a/bob/learn/misc/cpp/Gaussian.cpp b/bob/learn/misc/cpp/Gaussian.cpp
index 7169c5f..75219d1 100644
--- a/bob/learn/misc/cpp/Gaussian.cpp
+++ b/bob/learn/misc/cpp/Gaussian.cpp
@@ -11,44 +11,44 @@
 #include <bob.core/assert.h>
 #include <bob.math/log.h>
 
-bob::machine::Gaussian::Gaussian() {
+bob::learn::misc::Gaussian::Gaussian() {
   resize(0);
 }
 
-bob::machine::Gaussian::Gaussian(const size_t n_inputs) {
+bob::learn::misc::Gaussian::Gaussian(const size_t n_inputs) {
   resize(n_inputs);
 }
 
-bob::machine::Gaussian::Gaussian(const bob::machine::Gaussian& other) {
+bob::learn::misc::Gaussian::Gaussian(const bob::learn::misc::Gaussian& other) {
   copy(other);
 }
 
-bob::machine::Gaussian::Gaussian(bob::io::base::HDF5File& config) {
+bob::learn::misc::Gaussian::Gaussian(bob::io::base::HDF5File& config) {
   load(config);
 }
 
-bob::machine::Gaussian::~Gaussian() {
+bob::learn::misc::Gaussian::~Gaussian() {
 }
 
-bob::machine::Gaussian& bob::machine::Gaussian::operator=(const bob::machine::Gaussian &other) {
+bob::learn::misc::Gaussian& bob::learn::misc::Gaussian::operator=(const bob::learn::misc::Gaussian &other) {
   if(this != &other)
     copy(other);
 
   return *this;
 }
 
-bool bob::machine::Gaussian::operator==(const bob::machine::Gaussian& b) const
+bool bob::learn::misc::Gaussian::operator==(const bob::learn::misc::Gaussian& b) const
 {
   return (bob::core::array::isEqual(m_mean, b.m_mean) &&
           bob::core::array::isEqual(m_variance, b.m_variance) &&
           bob::core::array::isEqual(m_variance_thresholds, b.m_variance_thresholds));
 }
 
-bool bob::machine::Gaussian::operator!=(const bob::machine::Gaussian& b) const {
+bool bob::learn::misc::Gaussian::operator!=(const bob::learn::misc::Gaussian& b) const {
   return !(this->operator==(b));
 }
 
-bool bob::machine::Gaussian::is_similar_to(const bob::machine::Gaussian& b,
+bool bob::learn::misc::Gaussian::is_similar_to(const bob::learn::misc::Gaussian& b,
   const double r_epsilon, const double a_epsilon) const
 {
   return (bob::core::array::isClose(m_mean, b.m_mean, r_epsilon, a_epsilon) &&
@@ -56,7 +56,7 @@ bool bob::machine::Gaussian::is_similar_to(const bob::machine::Gaussian& b,
           bob::core::array::isClose(m_variance_thresholds, b.m_variance_thresholds, r_epsilon, a_epsilon));
 }
 
-void bob::machine::Gaussian::copy(const bob::machine::Gaussian& other) {
+void bob::learn::misc::Gaussian::copy(const bob::learn::misc::Gaussian& other) {
   m_n_inputs = other.m_n_inputs;
 
   m_mean.resize(m_n_inputs);
@@ -73,11 +73,11 @@ void bob::machine::Gaussian::copy(const bob::machine::Gaussian& other) {
 }
 
 
-void bob::machine::Gaussian::setNInputs(const size_t n_inputs) {
+void bob::learn::misc::Gaussian::setNInputs(const size_t n_inputs) {
   resize(n_inputs);
 }
 
-void bob::machine::Gaussian::resize(const size_t n_inputs) {
+void bob::learn::misc::Gaussian::resize(const size_t n_inputs) {
   m_n_inputs = n_inputs;
   m_mean.resize(m_n_inputs);
   m_mean = 0;
@@ -92,13 +92,13 @@ void bob::machine::Gaussian::resize(const size_t n_inputs) {
   preComputeConstants();
 }
 
-void bob::machine::Gaussian::setMean(const blitz::Array<double,1> &mean) {
+void bob::learn::misc::Gaussian::setMean(const blitz::Array<double,1> &mean) {
   // Check and set
   bob::core::array::assertSameShape(m_mean, mean);
   m_mean = mean;
 }
 
-void bob::machine::Gaussian::setVariance(const blitz::Array<double,1> &variance) {
+void bob::learn::misc::Gaussian::setVariance(const blitz::Array<double,1> &variance) {
   // Check and set
   bob::core::array::assertSameShape(m_variance, variance);
   m_variance = variance;
@@ -107,7 +107,7 @@ void bob::machine::Gaussian::setVariance(const blitz::Array<double,1> &variance)
   applyVarianceThresholds();
 }
 
-void bob::machine::Gaussian::setVarianceThresholds(const blitz::Array<double,1> &variance_thresholds) {
+void bob::learn::misc::Gaussian::setVarianceThresholds(const blitz::Array<double,1> &variance_thresholds) {
   // Check and set
   bob::core::array::assertSameShape(m_variance_thresholds, variance_thresholds);
   m_variance_thresholds = variance_thresholds;
@@ -116,13 +116,13 @@ void bob::machine::Gaussian::setVarianceThresholds(const blitz::Array<double,1>
   applyVarianceThresholds();
 }
 
-void bob::machine::Gaussian::setVarianceThresholds(const double value) {
+void bob::learn::misc::Gaussian::setVarianceThresholds(const double value) {
   blitz::Array<double,1> variance_thresholds(m_n_inputs);
   variance_thresholds = value;
   setVarianceThresholds(variance_thresholds);
 }
 
-void bob::machine::Gaussian::applyVarianceThresholds() {
+void bob::learn::misc::Gaussian::applyVarianceThresholds() {
    // Apply variance flooring threshold
   m_variance = blitz::where( m_variance < m_variance_thresholds, m_variance_thresholds, m_variance);
 
@@ -130,27 +130,27 @@ void bob::machine::Gaussian::applyVarianceThresholds() {
   preComputeConstants();
 }
 
-double bob::machine::Gaussian::logLikelihood(const blitz::Array<double,1> &x) const {
+double bob::learn::misc::Gaussian::logLikelihood(const blitz::Array<double,1> &x) const {
   // Check
   bob::core::array::assertSameShape(x, m_mean);
   return logLikelihood_(x);
 }
 
-double bob::machine::Gaussian::logLikelihood_(const blitz::Array<double,1> &x) const {
+double bob::learn::misc::Gaussian::logLikelihood_(const blitz::Array<double,1> &x) const {
   double z = blitz::sum(blitz::pow2(x - m_mean) / m_variance);
   // Log Likelihood
   return (-0.5 * (m_g_norm + z));
 }
 
-void bob::machine::Gaussian::preComputeNLog2Pi() {
+void bob::learn::misc::Gaussian::preComputeNLog2Pi() {
   m_n_log2pi = m_n_inputs * bob::math::Log::Log2Pi;
 }
 
-void bob::machine::Gaussian::preComputeConstants() {
+void bob::learn::misc::Gaussian::preComputeConstants() {
   m_g_norm = m_n_log2pi + blitz::sum(blitz::log(m_variance));
 }
 
-void bob::machine::Gaussian::save(bob::io::base::HDF5File& config) const {
+void bob::learn::misc::Gaussian::save(bob::io::base::HDF5File& config) const {
   config.setArray("m_mean", m_mean);
   config.setArray("m_variance", m_variance);
   config.setArray("m_variance_thresholds", m_variance_thresholds);
@@ -159,7 +159,7 @@ void bob::machine::Gaussian::save(bob::io::base::HDF5File& config) const {
   config.set("m_n_inputs", v);
 }
 
-void bob::machine::Gaussian::load(bob::io::base::HDF5File& config) {
+void bob::learn::misc::Gaussian::load(bob::io::base::HDF5File& config) {
   int64_t v = config.read<int64_t>("m_n_inputs");
   m_n_inputs = static_cast<size_t>(v);
 
@@ -175,12 +175,10 @@ void bob::machine::Gaussian::load(bob::io::base::HDF5File& config) {
   m_g_norm = config.read<double>("g_norm");
 }
 
-namespace bob{
-  namespace machine{
-    std::ostream& operator<<(std::ostream& os, const Gaussian& g) {
-      os << "Mean = " << g.m_mean << std::endl;
-      os << "Variance = " << g.m_variance << std::endl;
-      return os;
-    }
+namespace bob { namespace learn { namespace misc {
+  std::ostream& operator<<(std::ostream& os, const Gaussian& g) {
+    os << "Mean = " << g.m_mean << std::endl;
+    os << "Variance = " << g.m_variance << std::endl;
+    return os;
   }
-}
+} } }
diff --git a/bob/learn/misc/cpp/IVectorMachine.cpp b/bob/learn/misc/cpp/IVectorMachine.cpp
index c022a80..b717c70 100644
--- a/bob/learn/misc/cpp/IVectorMachine.cpp
+++ b/bob/learn/misc/cpp/IVectorMachine.cpp
@@ -11,11 +11,11 @@
 #include <bob.math/linear.h>
 #include <bob.math/linsolve.h>
 
-bob::machine::IVectorMachine::IVectorMachine()
+bob::learn::misc::IVectorMachine::IVectorMachine()
 {
 }
 
-bob::machine::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::machine::GMMMachine> ubm,
+bob::learn::misc::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
     const size_t rt, const double variance_threshold):
   m_ubm(ubm), m_rt(rt),
   m_T(getDimCD(),rt), m_sigma(getDimCD()),
@@ -24,7 +24,7 @@ bob::machine::IVectorMachine::IVectorMachine(const boost::shared_ptr<bob::machin
   resizePrecompute();
 }
 
-bob::machine::IVectorMachine::IVectorMachine(const bob::machine::IVectorMachine& other):
+bob::learn::misc::IVectorMachine::IVectorMachine(const bob::learn::misc::IVectorMachine& other):
   m_ubm(other.m_ubm), m_rt(other.m_rt),
   m_T(bob::core::array::ccopy(other.m_T)),
   m_sigma(bob::core::array::ccopy(other.m_sigma)),
@@ -33,22 +33,22 @@ bob::machine::IVectorMachine::IVectorMachine(const bob::machine::IVectorMachine&
   resizePrecompute();
 }
 
-bob::machine::IVectorMachine::IVectorMachine(bob::io::base::HDF5File& config)
+bob::learn::misc::IVectorMachine::IVectorMachine(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::IVectorMachine::~IVectorMachine() {
+bob::learn::misc::IVectorMachine::~IVectorMachine() {
 }
 
-void bob::machine::IVectorMachine::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::IVectorMachine::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("m_T", m_T);
   config.setArray("m_sigma", m_sigma);
   config.set("m_variance_threshold", m_variance_threshold);
 }
 
-void bob::machine::IVectorMachine::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::IVectorMachine::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   m_T.reference(config.readArray<double,2>("m_T"));
@@ -58,15 +58,15 @@ void bob::machine::IVectorMachine::load(bob::io::base::HDF5File& config)
   resizePrecompute();
 }
 
-void bob::machine::IVectorMachine::resize(const size_t rt)
+void bob::learn::misc::IVectorMachine::resize(const size_t rt)
 {
   m_rt = rt;
   m_T.resizeAndPreserve(m_T.extent(0), rt);
   resizePrecompute();
 }
 
-bob::machine::IVectorMachine&
-bob::machine::IVectorMachine::operator=(const bob::machine::IVectorMachine& other)
+bob::learn::misc::IVectorMachine&
+bob::learn::misc::IVectorMachine::operator=(const bob::learn::misc::IVectorMachine& other)
 {
   if (this != &other)
   {
@@ -80,7 +80,7 @@ bob::machine::IVectorMachine::operator=(const bob::machine::IVectorMachine& othe
   return *this;
 }
 
-bool bob::machine::IVectorMachine::operator==(const IVectorMachine& b) const
+bool bob::learn::misc::IVectorMachine::operator==(const IVectorMachine& b) const
 {
   return (((m_ubm && b.m_ubm) && *m_ubm == *(b.m_ubm)) || (!m_ubm && !b.m_ubm)) &&
          m_rt == b.m_rt &&
@@ -89,12 +89,12 @@ bool bob::machine::IVectorMachine::operator==(const IVectorMachine& b) const
          m_variance_threshold == b.m_variance_threshold;
 }
 
-bool bob::machine::IVectorMachine::operator!=(const bob::machine::IVectorMachine& b) const
+bool bob::learn::misc::IVectorMachine::operator!=(const bob::learn::misc::IVectorMachine& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::IVectorMachine::is_similar_to(const IVectorMachine& b,
+bool bob::learn::misc::IVectorMachine::is_similar_to(const IVectorMachine& b,
   const double r_epsilon, const double a_epsilon) const
 {
   // TODO: update with new is_similar_to method
@@ -105,13 +105,13 @@ bool bob::machine::IVectorMachine::is_similar_to(const IVectorMachine& b,
           bob::core::isClose(m_variance_threshold, b.m_variance_threshold, r_epsilon, a_epsilon);
 }
 
-void bob::machine::IVectorMachine::setUbm(const boost::shared_ptr<bob::machine::GMMMachine> ubm)
+void bob::learn::misc::IVectorMachine::setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
 {
   m_ubm = ubm;
   resizePrecompute();
 }
 
-void bob::machine::IVectorMachine::setT(const blitz::Array<double,2>& T)
+void bob::learn::misc::IVectorMachine::setT(const blitz::Array<double,2>& T)
 {
   bob::core::array::assertSameShape(m_T, T);
   m_T = T;
@@ -119,7 +119,7 @@ void bob::machine::IVectorMachine::setT(const blitz::Array<double,2>& T)
   precompute();
 }
 
-void bob::machine::IVectorMachine::setSigma(const blitz::Array<double,1>& sigma)
+void bob::learn::misc::IVectorMachine::setSigma(const blitz::Array<double,1>& sigma)
 {
   bob::core::array::assertSameShape(m_sigma, sigma);
   m_sigma = sigma;
@@ -128,20 +128,20 @@ void bob::machine::IVectorMachine::setSigma(const blitz::Array<double,1>& sigma)
 }
 
 
-void bob::machine::IVectorMachine::setVarianceThreshold(const double thd)
+void bob::learn::misc::IVectorMachine::setVarianceThreshold(const double thd)
 {
   m_variance_threshold = thd;
   // Update cache
   precompute();
 }
 
-void bob::machine::IVectorMachine::applyVarianceThreshold()
+void bob::learn::misc::IVectorMachine::applyVarianceThreshold()
 {
   // Apply variance flooring threshold
   m_sigma = blitz::where(m_sigma < m_variance_threshold, m_variance_threshold, m_sigma);
 }
 
-void bob::machine::IVectorMachine::precompute()
+void bob::learn::misc::IVectorMachine::precompute()
 {
   if (m_ubm)
   {
@@ -174,14 +174,14 @@ void bob::machine::IVectorMachine::precompute()
   }
 }
 
-void bob::machine::IVectorMachine::resizePrecompute()
+void bob::learn::misc::IVectorMachine::resizePrecompute()
 {
   resizeCache();
   resizeTmp();
   precompute();
 }
 
-void bob::machine::IVectorMachine::resizeCache()
+void bob::learn::misc::IVectorMachine::resizeCache()
 {
   if (m_ubm)
   {
@@ -192,7 +192,7 @@ void bob::machine::IVectorMachine::resizeCache()
   }
 }
 
-void bob::machine::IVectorMachine::resizeTmp()
+void bob::learn::misc::IVectorMachine::resizeTmp()
 {
   if (m_ubm)
     m_tmp_d.resize(m_ubm->getNInputs());
@@ -201,15 +201,15 @@ void bob::machine::IVectorMachine::resizeTmp()
   m_tmp_tt.resize(m_rt, m_rt);
 }
 
-void bob::machine::IVectorMachine::forward(const bob::machine::GMMStats& gs,
+void bob::learn::misc::IVectorMachine::forward(const bob::learn::misc::GMMStats& gs,
   blitz::Array<double,1>& ivector) const
 {
   bob::core::array::assertSameDimensionLength(ivector.extent(0), (int)m_rt);
   forward_(gs, ivector);
 }
 
-void bob::machine::IVectorMachine::computeIdTtSigmaInvT(
-  const bob::machine::GMMStats& gs, blitz::Array<double,2>& output) const
+void bob::learn::misc::IVectorMachine::computeIdTtSigmaInvT(
+  const bob::learn::misc::GMMStats& gs, blitz::Array<double,2>& output) const
 {
   // Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
   blitz::Range rall = blitz::Range::all();
@@ -218,8 +218,8 @@ void bob::machine::IVectorMachine::computeIdTtSigmaInvT(
     output += gs.n(c) * m_cache_Tct_sigmacInv_Tc(c, rall, rall);
 }
 
-void bob::machine::IVectorMachine::computeTtSigmaInvFnorm(
-  const bob::machine::GMMStats& gs, blitz::Array<double,1>& output) const
+void bob::learn::misc::IVectorMachine::computeTtSigmaInvFnorm(
+  const bob::learn::misc::GMMStats& gs, blitz::Array<double,1>& output) const
 {
   // Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
   blitz::Range rall = blitz::Range::all();
@@ -233,7 +233,7 @@ void bob::machine::IVectorMachine::computeTtSigmaInvFnorm(
   }
 }
 
-void bob::machine::IVectorMachine::forward_(const bob::machine::GMMStats& gs,
+void bob::learn::misc::IVectorMachine::forward_(const bob::learn::misc::GMMStats& gs,
   blitz::Array<double,1>& ivector) const
 {
   // Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
diff --git a/bob/learn/misc/cpp/IVectorTrainer.cpp b/bob/learn/misc/cpp/IVectorTrainer.cpp
index 538f587..be843b2 100644
--- a/bob/learn/misc/cpp/IVectorTrainer.cpp
+++ b/bob/learn/misc/cpp/IVectorTrainer.cpp
@@ -17,19 +17,19 @@
 #include <boost/shared_ptr.hpp>
 #include <boost/random.hpp>
 
-bob::trainer::IVectorTrainer::IVectorTrainer(const bool update_sigma,
+bob::learn::misc::IVectorTrainer::IVectorTrainer(const bool update_sigma,
     const double convergence_threshold,
     const size_t max_iterations, bool compute_likelihood):
-  bob::trainer::EMTrainer<bob::machine::IVectorMachine,
-    std::vector<bob::machine::GMMStats> >(convergence_threshold,
+  bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine,
+    std::vector<bob::learn::misc::GMMStats> >(convergence_threshold,
       max_iterations, compute_likelihood),
   m_update_sigma(update_sigma)
 {
 }
 
-bob::trainer::IVectorTrainer::IVectorTrainer(const bob::trainer::IVectorTrainer& other):
-  bob::trainer::EMTrainer<bob::machine::IVectorMachine,
-    std::vector<bob::machine::GMMStats> >(other),
+bob::learn::misc::IVectorTrainer::IVectorTrainer(const bob::learn::misc::IVectorTrainer& other):
+  bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine,
+    std::vector<bob::learn::misc::GMMStats> >(other),
   m_update_sigma(other.m_update_sigma)
 {
   m_acc_Nij_wij2.reference(bob::core::array::ccopy(other.m_acc_Nij_wij2));
@@ -47,13 +47,13 @@ bob::trainer::IVectorTrainer::IVectorTrainer(const bob::trainer::IVectorTrainer&
   m_tmp_tt2.reference(bob::core::array::ccopy(other.m_tmp_tt2));
 }
 
-bob::trainer::IVectorTrainer::~IVectorTrainer()
+bob::learn::misc::IVectorTrainer::~IVectorTrainer()
 {
 }
 
-void bob::trainer::IVectorTrainer::initialize(
-  bob::machine::IVectorMachine& machine,
-  const std::vector<bob::machine::GMMStats>& data)
+void bob::learn::misc::IVectorTrainer::initialize(
+  bob::learn::misc::IVectorMachine& machine,
+  const std::vector<bob::learn::misc::GMMStats>& data)
 {
   const int C = machine.getDimC();
   const int D = machine.getDimD();
@@ -87,9 +87,9 @@ void bob::trainer::IVectorTrainer::initialize(
   machine.precompute();
 }
 
-void bob::trainer::IVectorTrainer::eStep(
-  bob::machine::IVectorMachine& machine,
-  const std::vector<bob::machine::GMMStats>& data)
+void bob::learn::misc::IVectorTrainer::eStep(
+  bob::learn::misc::IVectorMachine& machine,
+  const std::vector<bob::learn::misc::GMMStats>& data)
 {
   blitz::Range rall = blitz::Range::all();
   const int C = machine.getDimC();
@@ -102,7 +102,7 @@ void bob::trainer::IVectorTrainer::eStep(
     m_acc_Nij = 0.;
     m_acc_Snormij = 0.;
   }
-  for (std::vector<bob::machine::GMMStats>::const_iterator it = data.begin();
+  for (std::vector<bob::learn::misc::GMMStats>::const_iterator it = data.begin();
        it != data.end(); ++it)
   {
     // Computes E{wij} and E{wij.wij^{T}}
@@ -144,9 +144,9 @@ void bob::trainer::IVectorTrainer::eStep(
   }
 }
 
-void bob::trainer::IVectorTrainer::mStep(
-  bob::machine::IVectorMachine& machine,
-  const std::vector<bob::machine::GMMStats>& data)
+void bob::learn::misc::IVectorTrainer::mStep(
+  bob::learn::misc::IVectorMachine& machine,
+  const std::vector<bob::learn::misc::GMMStats>& data)
 {
   blitz::Range rall = blitz::Range::all();
   blitz::Array<double,2>& T = machine.updateT();
@@ -179,26 +179,26 @@ void bob::trainer::IVectorTrainer::mStep(
 }
 
 
-double bob::trainer::IVectorTrainer::computeLikelihood(
-  bob::machine::IVectorMachine& machine)
+double bob::learn::misc::IVectorTrainer::computeLikelihood(
+  bob::learn::misc::IVectorMachine& machine)
 {
   // TODO: implementation
   return 0;
 }
 
-void bob::trainer::IVectorTrainer::finalize(
-  bob::machine::IVectorMachine& machine,
-  const std::vector<bob::machine::GMMStats>& data)
+void bob::learn::misc::IVectorTrainer::finalize(
+  bob::learn::misc::IVectorMachine& machine,
+  const std::vector<bob::learn::misc::GMMStats>& data)
 {
 }
 
-bob::trainer::IVectorTrainer& bob::trainer::IVectorTrainer::operator=
-  (const bob::trainer::IVectorTrainer &other)
+bob::learn::misc::IVectorTrainer& bob::learn::misc::IVectorTrainer::operator=
+  (const bob::learn::misc::IVectorTrainer &other)
 {
   if (this != &other)
   {
-    bob::trainer::EMTrainer<bob::machine::IVectorMachine,
-      std::vector<bob::machine::GMMStats> >::operator=(other);
+    bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine,
+      std::vector<bob::learn::misc::GMMStats> >::operator=(other);
     m_update_sigma = other.m_update_sigma;
 
     m_acc_Nij_wij2.reference(bob::core::array::ccopy(other.m_acc_Nij_wij2));
@@ -218,11 +218,11 @@ bob::trainer::IVectorTrainer& bob::trainer::IVectorTrainer::operator=
   return *this;
 }
 
-bool bob::trainer::IVectorTrainer::operator==
-  (const bob::trainer::IVectorTrainer &other) const
+bool bob::learn::misc::IVectorTrainer::operator==
+  (const bob::learn::misc::IVectorTrainer &other) const
 {
-  return bob::trainer::EMTrainer<bob::machine::IVectorMachine,
-           std::vector<bob::machine::GMMStats> >::operator==(other) &&
+  return bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine,
+           std::vector<bob::learn::misc::GMMStats> >::operator==(other) &&
         m_update_sigma == other.m_update_sigma &&
         bob::core::array::isEqual(m_acc_Nij_wij2, other.m_acc_Nij_wij2) &&
         bob::core::array::isEqual(m_acc_Fnormij_wij, other.m_acc_Fnormij_wij) &&
@@ -230,18 +230,18 @@ bool bob::trainer::IVectorTrainer::operator==
         bob::core::array::isEqual(m_acc_Snormij, other.m_acc_Snormij);
 }
 
-bool bob::trainer::IVectorTrainer::operator!=
-  (const bob::trainer::IVectorTrainer &other) const
+bool bob::learn::misc::IVectorTrainer::operator!=
+  (const bob::learn::misc::IVectorTrainer &other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::IVectorTrainer::is_similar_to
-  (const bob::trainer::IVectorTrainer &other, const double r_epsilon,
+bool bob::learn::misc::IVectorTrainer::is_similar_to
+  (const bob::learn::misc::IVectorTrainer &other, const double r_epsilon,
    const double a_epsilon) const
 {
-  return bob::trainer::EMTrainer<bob::machine::IVectorMachine,
-           std::vector<bob::machine::GMMStats> >::is_similar_to(other, r_epsilon, a_epsilon) &&
+  return bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine,
+           std::vector<bob::learn::misc::GMMStats> >::is_similar_to(other, r_epsilon, a_epsilon) &&
         m_update_sigma == other.m_update_sigma &&
         bob::core::array::isClose(m_acc_Nij_wij2, other.m_acc_Nij_wij2, r_epsilon, a_epsilon) &&
         bob::core::array::isClose(m_acc_Fnormij_wij, other.m_acc_Fnormij_wij, r_epsilon, a_epsilon) &&
diff --git a/bob/learn/misc/cpp/JFAMachine.cpp b/bob/learn/misc/cpp/JFAMachine.cpp
index 7324cf1..bccbc49 100644
--- a/bob/learn/misc/cpp/JFAMachine.cpp
+++ b/bob/learn/misc/cpp/JFAMachine.cpp
@@ -15,13 +15,13 @@
 
 
 //////////////////// FABase ////////////////////
-bob::machine::FABase::FABase():
-  m_ubm(boost::shared_ptr<bob::machine::GMMMachine>()), m_ru(1), m_rv(1),
+bob::learn::misc::FABase::FABase():
+  m_ubm(boost::shared_ptr<bob::learn::misc::GMMMachine>()), m_ru(1), m_rv(1),
   m_U(0,1), m_V(0,1), m_d(0)
 {
 }
 
-bob::machine::FABase::FABase(const boost::shared_ptr<bob::machine::GMMMachine> ubm,
+bob::learn::misc::FABase::FABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
     const size_t ru, const size_t rv):
   m_ubm(ubm), m_ru(ru), m_rv(rv),
   m_U(getDimCD(),ru), m_V(getDimCD(),rv), m_d(getDimCD())
@@ -39,7 +39,7 @@ bob::machine::FABase::FABase(const boost::shared_ptr<bob::machine::GMMMachine> u
   updateCache();
 }
 
-bob::machine::FABase::FABase(const bob::machine::FABase& other):
+bob::learn::misc::FABase::FABase(const bob::learn::misc::FABase& other):
   m_ubm(other.m_ubm), m_ru(other.m_ru), m_rv(other.m_rv),
   m_U(bob::core::array::ccopy(other.m_U)),
   m_V(bob::core::array::ccopy(other.m_V)),
@@ -48,11 +48,11 @@ bob::machine::FABase::FABase(const bob::machine::FABase& other):
   updateCache();
 }
 
-bob::machine::FABase::~FABase() {
+bob::learn::misc::FABase::~FABase() {
 }
 
-bob::machine::FABase& bob::machine::FABase::operator=
-(const bob::machine::FABase& other)
+bob::learn::misc::FABase& bob::learn::misc::FABase::operator=
+(const bob::learn::misc::FABase& other)
 {
   if (this != &other)
   {
@@ -68,7 +68,7 @@ bob::machine::FABase& bob::machine::FABase::operator=
   return *this;
 }
 
-bool bob::machine::FABase::operator==(const bob::machine::FABase& b) const
+bool bob::learn::misc::FABase::operator==(const bob::learn::misc::FABase& b) const
 {
   return ( (((m_ubm && b.m_ubm) && *m_ubm == *(b.m_ubm)) || (!m_ubm && !b.m_ubm)) &&
           m_ru == b.m_ru && m_rv == b.m_rv &&
@@ -77,12 +77,12 @@ bool bob::machine::FABase::operator==(const bob::machine::FABase& b) const
           bob::core::array::isEqual(m_d, b.m_d));
 }
 
-bool bob::machine::FABase::operator!=(const bob::machine::FABase& b) const
+bool bob::learn::misc::FABase::operator!=(const bob::learn::misc::FABase& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::FABase::is_similar_to(const bob::machine::FABase& b,
+bool bob::learn::misc::FABase::is_similar_to(const bob::learn::misc::FABase& b,
     const double r_epsilon, const double a_epsilon) const
 {
   // TODO: update is_similar_to of the GMMMachine with the 2 epsilon's
@@ -94,7 +94,7 @@ bool bob::machine::FABase::is_similar_to(const bob::machine::FABase& b,
           bob::core::array::isClose(m_d, b.m_d, r_epsilon, a_epsilon));
 }
 
-void bob::machine::FABase::resize(const size_t ru, const size_t rv)
+void bob::learn::misc::FABase::resize(const size_t ru, const size_t rv)
 {
   if (ru < 1) {
     boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
@@ -115,7 +115,7 @@ void bob::machine::FABase::resize(const size_t ru, const size_t rv)
   updateCacheUbmUVD();
 }
 
-void bob::machine::FABase::resize(const size_t ru, const size_t rv, const size_t cd)
+void bob::learn::misc::FABase::resize(const size_t ru, const size_t rv, const size_t cd)
 {
   if (ru < 1) {
     boost::format m("value for parameter `ru' (%lu) cannot be smaller than 1");
@@ -145,7 +145,7 @@ void bob::machine::FABase::resize(const size_t ru, const size_t rv, const size_t
   }
 }
 
-void bob::machine::FABase::setUbm(const boost::shared_ptr<bob::machine::GMMMachine> ubm)
+void bob::learn::misc::FABase::setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
 {
   m_ubm = ubm;
   m_U.resizeAndPreserve(getDimCD(), m_ru);
@@ -155,7 +155,7 @@ void bob::machine::FABase::setUbm(const boost::shared_ptr<bob::machine::GMMMachi
   updateCache();
 }
 
-void bob::machine::FABase::setU(const blitz::Array<double,2>& U)
+void bob::learn::misc::FABase::setU(const blitz::Array<double,2>& U)
 {
   if(U.extent(0) != m_U.extent(0)) { //checks dimension
     boost::format m("number of rows in parameter `U' (%d) does not match the expected size (%d)");
@@ -173,7 +173,7 @@ void bob::machine::FABase::setU(const blitz::Array<double,2>& U)
   updateCacheUbmUVD();
 }
 
-void bob::machine::FABase::setV(const blitz::Array<double,2>& V)
+void bob::learn::misc::FABase::setV(const blitz::Array<double,2>& V)
 {
   if(V.extent(0) != m_V.extent(0)) { //checks dimension
     boost::format m("number of rows in parameter `V' (%d) does not match the expected size (%d)");
@@ -188,7 +188,7 @@ void bob::machine::FABase::setV(const blitz::Array<double,2>& V)
   m_V.reference(bob::core::array::ccopy(V));
 }
 
-void bob::machine::FABase::setD(const blitz::Array<double,1>& d)
+void bob::learn::misc::FABase::setD(const blitz::Array<double,1>& d)
 {
   if(d.extent(0) != m_d.extent(0)) { //checks dimension
     boost::format m("size of input vector `d' (%d) does not match the expected size (%d)");
@@ -199,14 +199,14 @@ void bob::machine::FABase::setD(const blitz::Array<double,1>& d)
 }
 
 
-void bob::machine::FABase::updateCache()
+void bob::learn::misc::FABase::updateCache()
 {
   updateCacheUbm();
   updateCacheUbmUVD();
   resizeTmp();
 }
 
-void bob::machine::FABase::resizeTmp()
+void bob::learn::misc::FABase::resizeTmp()
 {
   m_tmp_IdPlusUSProdInv.resize(getDimRu(),getDimRu());
   m_tmp_Fn_x.resize(getDimCD());
@@ -215,7 +215,7 @@ void bob::machine::FABase::resizeTmp()
   m_tmp_ruru.resize(getDimRu(), getDimRu());
 }
 
-void bob::machine::FABase::updateCacheUbm()
+void bob::learn::misc::FABase::updateCacheUbm()
 {
   // Put supervectors in cache
   if (m_ubm)
@@ -227,7 +227,7 @@ void bob::machine::FABase::updateCacheUbm()
   }
 }
 
-void bob::machine::FABase::updateCacheUbmUVD()
+void bob::learn::misc::FABase::updateCacheUbmUVD()
 {
   // Compute and put  U^{T}.diag(sigma)^{-1} in cache
   if (m_ubm)
@@ -239,7 +239,7 @@ void bob::machine::FABase::updateCacheUbmUVD()
   }
 }
 
-void bob::machine::FABase::computeIdPlusUSProdInv(const bob::machine::GMMStats& gmm_stats,
+void bob::learn::misc::FABase::computeIdPlusUSProdInv(const bob::learn::misc::GMMStats& gmm_stats,
   blitz::Array<double,2>& output) const
 {
   // Computes (Id + U^T.Sigma^-1.U.N_{i,h}.U)^-1 =
@@ -273,7 +273,7 @@ void bob::machine::FABase::computeIdPlusUSProdInv(const bob::machine::GMMStats&
 }
 
 
-void bob::machine::FABase::computeFn_x(const bob::machine::GMMStats& gmm_stats,
+void bob::learn::misc::FABase::computeFn_x(const bob::learn::misc::GMMStats& gmm_stats,
   blitz::Array<double,1>& output) const
 {
   // Compute Fn_x = sum_{sessions h}(N*(o - m) (Normalised first order statistics)
@@ -288,7 +288,7 @@ void bob::machine::FABase::computeFn_x(const bob::machine::GMMStats& gmm_stats,
   }
 }
 
-void bob::machine::FABase::estimateX(const blitz::Array<double,2>& IdPlusUSProdInv,
+void bob::learn::misc::FABase::estimateX(const blitz::Array<double,2>& IdPlusUSProdInv,
   const blitz::Array<double,1>& Fn_x, blitz::Array<double,1>& x) const
 {
   // m_tmp_ru = UtSigmaInv * Fn_x = Ut*diag(sigma)^-1 * N*(o - m)
@@ -298,7 +298,7 @@ void bob::machine::FABase::estimateX(const blitz::Array<double,2>& IdPlusUSProdI
 }
 
 
-void bob::machine::FABase::estimateX(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+void bob::learn::misc::FABase::estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
 {
   if (!m_ubm) throw std::runtime_error("No UBM was set in the JFA machine.");
   computeIdPlusUSProdInv(gmm_stats, m_tmp_IdPlusUSProdInv); // Computes first term
@@ -309,38 +309,38 @@ void bob::machine::FABase::estimateX(const bob::machine::GMMStats& gmm_stats, bl
 
 
 //////////////////// JFABase ////////////////////
-bob::machine::JFABase::JFABase()
+bob::learn::misc::JFABase::JFABase()
 {
 }
 
-bob::machine::JFABase::JFABase(const boost::shared_ptr<bob::machine::GMMMachine> ubm,
+bob::learn::misc::JFABase::JFABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
     const size_t ru, const size_t rv):
   m_base(ubm, ru, rv)
 {
 }
 
-bob::machine::JFABase::JFABase(const bob::machine::JFABase& other):
+bob::learn::misc::JFABase::JFABase(const bob::learn::misc::JFABase& other):
   m_base(other.m_base)
 {
 }
 
 
-bob::machine::JFABase::JFABase(bob::io::base::HDF5File& config)
+bob::learn::misc::JFABase::JFABase(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::JFABase::~JFABase() {
+bob::learn::misc::JFABase::~JFABase() {
 }
 
-void bob::machine::JFABase::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::JFABase::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("U", m_base.getU());
   config.setArray("V", m_base.getV());
   config.setArray("d", m_base.getD());
 }
 
-void bob::machine::JFABase::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::JFABase::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   blitz::Array<double,2> U = config.readArray<double,2>("U");
@@ -357,8 +357,8 @@ void bob::machine::JFABase::load(bob::io::base::HDF5File& config)
   m_base.setD(d);
 }
 
-bob::machine::JFABase&
-bob::machine::JFABase::operator=(const bob::machine::JFABase& other)
+bob::learn::misc::JFABase&
+bob::learn::misc::JFABase::operator=(const bob::learn::misc::JFABase& other)
 {
   if (this != &other)
   {
@@ -369,11 +369,11 @@ bob::machine::JFABase::operator=(const bob::machine::JFABase& other)
 
 
 //////////////////// ISVBase ////////////////////
-bob::machine::ISVBase::ISVBase()
+bob::learn::misc::ISVBase::ISVBase()
 {
 }
 
-bob::machine::ISVBase::ISVBase(const boost::shared_ptr<bob::machine::GMMMachine> ubm,
+bob::learn::misc::ISVBase::ISVBase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
     const size_t ru):
   m_base(ubm, ru, 1)
 {
@@ -381,27 +381,27 @@ bob::machine::ISVBase::ISVBase(const boost::shared_ptr<bob::machine::GMMMachine>
   V = 0;
 }
 
-bob::machine::ISVBase::ISVBase(const bob::machine::ISVBase& other):
+bob::learn::misc::ISVBase::ISVBase(const bob::learn::misc::ISVBase& other):
   m_base(other.m_base)
 {
 }
 
 
-bob::machine::ISVBase::ISVBase(bob::io::base::HDF5File& config)
+bob::learn::misc::ISVBase::ISVBase(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::ISVBase::~ISVBase() {
+bob::learn::misc::ISVBase::~ISVBase() {
 }
 
-void bob::machine::ISVBase::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::ISVBase::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("U", m_base.getU());
   config.setArray("d", m_base.getD());
 }
 
-void bob::machine::ISVBase::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::ISVBase::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   blitz::Array<double,2> U = config.readArray<double,2>("U");
@@ -417,8 +417,8 @@ void bob::machine::ISVBase::load(bob::io::base::HDF5File& config)
   V = 0;
 }
 
-bob::machine::ISVBase&
-bob::machine::ISVBase::operator=(const bob::machine::ISVBase& other)
+bob::learn::misc::ISVBase&
+bob::learn::misc::ISVBase::operator=(const bob::learn::misc::ISVBase& other)
 {
   if (this != &other)
   {
@@ -430,13 +430,13 @@ bob::machine::ISVBase::operator=(const bob::machine::ISVBase& other)
 
 
 //////////////////// JFAMachine ////////////////////
-bob::machine::JFAMachine::JFAMachine():
+bob::learn::misc::JFAMachine::JFAMachine():
   m_y(1), m_z(1)
 {
   resizeTmp();
 }
 
-bob::machine::JFAMachine::JFAMachine(const boost::shared_ptr<bob::machine::JFABase> jfa_base):
+bob::learn::misc::JFAMachine::JFAMachine(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base):
   m_jfa_base(jfa_base),
   m_y(jfa_base->getDimRv()), m_z(jfa_base->getDimCD())
 {
@@ -446,7 +446,7 @@ bob::machine::JFAMachine::JFAMachine(const boost::shared_ptr<bob::machine::JFABa
 }
 
 
-bob::machine::JFAMachine::JFAMachine(const bob::machine::JFAMachine& other):
+bob::learn::misc::JFAMachine::JFAMachine(const bob::learn::misc::JFAMachine& other):
   m_jfa_base(other.m_jfa_base),
   m_y(bob::core::array::ccopy(other.m_y)),
   m_z(bob::core::array::ccopy(other.m_z))
@@ -455,16 +455,16 @@ bob::machine::JFAMachine::JFAMachine(const bob::machine::JFAMachine& other):
   resizeTmp();
 }
 
-bob::machine::JFAMachine::JFAMachine(bob::io::base::HDF5File& config)
+bob::learn::misc::JFAMachine::JFAMachine(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::JFAMachine::~JFAMachine() {
+bob::learn::misc::JFAMachine::~JFAMachine() {
 }
 
-bob::machine::JFAMachine&
-bob::machine::JFAMachine::operator=(const bob::machine::JFAMachine& other)
+bob::learn::misc::JFAMachine&
+bob::learn::misc::JFAMachine::operator=(const bob::learn::misc::JFAMachine& other)
 {
   if (this != &other)
   {
@@ -475,20 +475,20 @@ bob::machine::JFAMachine::operator=(const bob::machine::JFAMachine& other)
   return *this;
 }
 
-bool bob::machine::JFAMachine::operator==(const bob::machine::JFAMachine& other) const
+bool bob::learn::misc::JFAMachine::operator==(const bob::learn::misc::JFAMachine& other) const
 {
   return (*m_jfa_base == *(other.m_jfa_base) &&
           bob::core::array::isEqual(m_y, other.m_y) &&
           bob::core::array::isEqual(m_z, other.m_z));
 }
 
-bool bob::machine::JFAMachine::operator!=(const bob::machine::JFAMachine& b) const
+bool bob::learn::misc::JFAMachine::operator!=(const bob::learn::misc::JFAMachine& b) const
 {
   return !(this->operator==(b));
 }
 
 
-bool bob::machine::JFAMachine::is_similar_to(const bob::machine::JFAMachine& b,
+bool bob::learn::misc::JFAMachine::is_similar_to(const bob::learn::misc::JFAMachine& b,
     const double r_epsilon, const double a_epsilon) const
 {
   return (m_jfa_base->is_similar_to(*(b.m_jfa_base), r_epsilon, a_epsilon) &&
@@ -496,13 +496,13 @@ bool bob::machine::JFAMachine::is_similar_to(const bob::machine::JFAMachine& b,
           bob::core::array::isClose(m_z, b.m_z, r_epsilon, a_epsilon));
 }
 
-void bob::machine::JFAMachine::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::JFAMachine::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("y", m_y);
   config.setArray("z", m_z);
 }
 
-void bob::machine::JFAMachine::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::JFAMachine::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   blitz::Array<double,1> y = config.readArray<double,1>("y");
@@ -520,7 +520,7 @@ void bob::machine::JFAMachine::load(bob::io::base::HDF5File& config)
 }
 
 
-void bob::machine::JFAMachine::setY(const blitz::Array<double,1>& y)
+void bob::learn::misc::JFAMachine::setY(const blitz::Array<double,1>& y)
 {
   if(y.extent(0) != m_y.extent(0)) { //checks dimension
     boost::format m("size of input vector `y' (%d) does not match the expected size (%d)");
@@ -532,7 +532,7 @@ void bob::machine::JFAMachine::setY(const blitz::Array<double,1>& y)
   updateCache();
 }
 
-void bob::machine::JFAMachine::setZ(const blitz::Array<double,1>& z)
+void bob::learn::misc::JFAMachine::setZ(const blitz::Array<double,1>& z)
 {
   if(z.extent(0) != m_z.extent(0)) { //checks dimension
     boost::format m("size of input vector `z' (%d) does not match the expected size (%d)");
@@ -544,7 +544,7 @@ void bob::machine::JFAMachine::setZ(const blitz::Array<double,1>& z)
   updateCache();
 }
 
-void bob::machine::JFAMachine::setJFABase(const boost::shared_ptr<bob::machine::JFABase> jfa_base)
+void bob::learn::misc::JFAMachine::setJFABase(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base)
 {
   if (!jfa_base->getUbm())
     throw std::runtime_error("No UBM was set in the JFA machine.");
@@ -553,7 +553,7 @@ void bob::machine::JFAMachine::setJFABase(const boost::shared_ptr<bob::machine::
   resize();
 }
 
-void bob::machine::JFAMachine::resize()
+void bob::learn::misc::JFAMachine::resize()
 {
   m_y.resizeAndPreserve(getDimRv());
   m_z.resizeAndPreserve(getDimCD());
@@ -561,7 +561,7 @@ void bob::machine::JFAMachine::resize()
   resizeTmp();
 }
 
-void bob::machine::JFAMachine::resizeTmp()
+void bob::learn::misc::JFAMachine::resizeTmp()
 {
   if (m_jfa_base)
   {
@@ -569,7 +569,7 @@ void bob::machine::JFAMachine::resizeTmp()
   }
 }
 
-void bob::machine::JFAMachine::updateCache()
+void bob::learn::misc::JFAMachine::updateCache()
 {
   if (m_jfa_base)
   {
@@ -581,31 +581,31 @@ void bob::machine::JFAMachine::updateCache()
   }
 }
 
-void bob::machine::JFAMachine::estimateUx(const bob::machine::GMMStats& gmm_stats,
+void bob::learn::misc::JFAMachine::estimateUx(const bob::learn::misc::GMMStats& gmm_stats,
   blitz::Array<double,1>& Ux)
 {
   estimateX(gmm_stats, m_cache_x);
   bob::math::prod(m_jfa_base->getU(), m_cache_x, Ux);
 }
 
-void bob::machine::JFAMachine::forward(const bob::machine::GMMStats& input,
+void bob::learn::misc::JFAMachine::forward(const bob::learn::misc::GMMStats& input,
   double& score) const
 {
   forward_(input, score);
 }
 
-void bob::machine::JFAMachine::forward(const bob::machine::GMMStats& gmm_stats,
+void bob::learn::misc::JFAMachine::forward(const bob::learn::misc::GMMStats& gmm_stats,
   const blitz::Array<double,1>& Ux, double& score) const
 {
   // Checks that a Base machine has been set
   if (!m_jfa_base) throw std::runtime_error("No UBM was set in the JFA machine.");
 
-  score = bob::machine::linearScoring(m_cache_mVyDz,
+  score = bob::learn::misc::linearScoring(m_cache_mVyDz,
             m_jfa_base->getUbm()->getMeanSupervector(), m_jfa_base->getUbm()->getVarianceSupervector(),
             gmm_stats, Ux, true);
 }
 
-void bob::machine::JFAMachine::forward_(const bob::machine::GMMStats& input,
+void bob::learn::misc::JFAMachine::forward_(const bob::learn::misc::GMMStats& input,
   double& score) const
 {
   // Checks that a Base machine has been set
@@ -615,7 +615,7 @@ void bob::machine::JFAMachine::forward_(const bob::machine::GMMStats& input,
   estimateX(input, m_cache_x);
   bob::math::prod(m_jfa_base->getU(), m_cache_x, m_tmp_Ux);
 
-  score = bob::machine::linearScoring(m_cache_mVyDz,
+  score = bob::learn::misc::linearScoring(m_cache_mVyDz,
             m_jfa_base->getUbm()->getMeanSupervector(), m_jfa_base->getUbm()->getVarianceSupervector(),
             input, m_tmp_Ux, true);
 }
@@ -623,13 +623,13 @@ void bob::machine::JFAMachine::forward_(const bob::machine::GMMStats& input,
 
 
 //////////////////// ISVMachine ////////////////////
-bob::machine::ISVMachine::ISVMachine():
+bob::learn::misc::ISVMachine::ISVMachine():
   m_z(1)
 {
   resizeTmp();
 }
 
-bob::machine::ISVMachine::ISVMachine(const boost::shared_ptr<bob::machine::ISVBase> isv_base):
+bob::learn::misc::ISVMachine::ISVMachine(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base):
   m_isv_base(isv_base),
   m_z(isv_base->getDimCD())
 {
@@ -640,7 +640,7 @@ bob::machine::ISVMachine::ISVMachine(const boost::shared_ptr<bob::machine::ISVBa
 }
 
 
-bob::machine::ISVMachine::ISVMachine(const bob::machine::ISVMachine& other):
+bob::learn::misc::ISVMachine::ISVMachine(const bob::learn::misc::ISVMachine& other):
   m_isv_base(other.m_isv_base),
   m_z(bob::core::array::ccopy(other.m_z))
 {
@@ -648,16 +648,16 @@ bob::machine::ISVMachine::ISVMachine(const bob::machine::ISVMachine& other):
   resizeTmp();
 }
 
-bob::machine::ISVMachine::ISVMachine(bob::io::base::HDF5File& config)
+bob::learn::misc::ISVMachine::ISVMachine(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::ISVMachine::~ISVMachine() {
+bob::learn::misc::ISVMachine::~ISVMachine() {
 }
 
-bob::machine::ISVMachine&
-bob::machine::ISVMachine::operator=(const bob::machine::ISVMachine& other)
+bob::learn::misc::ISVMachine&
+bob::learn::misc::ISVMachine::operator=(const bob::learn::misc::ISVMachine& other)
 {
   if (this != &other)
   {
@@ -667,31 +667,31 @@ bob::machine::ISVMachine::operator=(const bob::machine::ISVMachine& other)
   return *this;
 }
 
-bool bob::machine::ISVMachine::operator==(const bob::machine::ISVMachine& other) const
+bool bob::learn::misc::ISVMachine::operator==(const bob::learn::misc::ISVMachine& other) const
 {
   return (*m_isv_base == *(other.m_isv_base) &&
           bob::core::array::isEqual(m_z, other.m_z));
 }
 
-bool bob::machine::ISVMachine::operator!=(const bob::machine::ISVMachine& b) const
+bool bob::learn::misc::ISVMachine::operator!=(const bob::learn::misc::ISVMachine& b) const
 {
   return !(this->operator==(b));
 }
 
 
-bool bob::machine::ISVMachine::is_similar_to(const bob::machine::ISVMachine& b,
+bool bob::learn::misc::ISVMachine::is_similar_to(const bob::learn::misc::ISVMachine& b,
     const double r_epsilon, const double a_epsilon) const
 {
   return (m_isv_base->is_similar_to(*(b.m_isv_base), r_epsilon, a_epsilon) &&
           bob::core::array::isClose(m_z, b.m_z, r_epsilon, a_epsilon));
 }
 
-void bob::machine::ISVMachine::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::ISVMachine::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("z", m_z);
 }
 
-void bob::machine::ISVMachine::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::ISVMachine::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   blitz::Array<double,1> z = config.readArray<double,1>("z");
@@ -703,7 +703,7 @@ void bob::machine::ISVMachine::load(bob::io::base::HDF5File& config)
   resizeTmp();
 }
 
-void bob::machine::ISVMachine::setZ(const blitz::Array<double,1>& z)
+void bob::learn::misc::ISVMachine::setZ(const blitz::Array<double,1>& z)
 {
   if(z.extent(0) != m_z.extent(0)) { //checks dimension
     boost::format m("size of input vector `z' (%d) does not match the expected size (%d)");
@@ -715,7 +715,7 @@ void bob::machine::ISVMachine::setZ(const blitz::Array<double,1>& z)
   updateCache();
 }
 
-void bob::machine::ISVMachine::setISVBase(const boost::shared_ptr<bob::machine::ISVBase> isv_base)
+void bob::learn::misc::ISVMachine::setISVBase(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base)
 {
   if (!isv_base->getUbm())
     throw std::runtime_error("No UBM was set in the JFA machine.");
@@ -724,14 +724,14 @@ void bob::machine::ISVMachine::setISVBase(const boost::shared_ptr<bob::machine::
   resize();
 }
 
-void bob::machine::ISVMachine::resize()
+void bob::learn::misc::ISVMachine::resize()
 {
   m_z.resizeAndPreserve(getDimCD());
   updateCache();
   resizeTmp();
 }
 
-void bob::machine::ISVMachine::resizeTmp()
+void bob::learn::misc::ISVMachine::resizeTmp()
 {
   if (m_isv_base)
   {
@@ -739,7 +739,7 @@ void bob::machine::ISVMachine::resizeTmp()
   }
 }
 
-void bob::machine::ISVMachine::updateCache()
+void bob::learn::misc::ISVMachine::updateCache()
 {
   if (m_isv_base)
   {
@@ -750,31 +750,31 @@ void bob::machine::ISVMachine::updateCache()
   }
 }
 
-void bob::machine::ISVMachine::estimateUx(const bob::machine::GMMStats& gmm_stats,
+void bob::learn::misc::ISVMachine::estimateUx(const bob::learn::misc::GMMStats& gmm_stats,
   blitz::Array<double,1>& Ux)
 {
   estimateX(gmm_stats, m_cache_x);
   bob::math::prod(m_isv_base->getU(), m_cache_x, Ux);
 }
 
-void bob::machine::ISVMachine::forward(const bob::machine::GMMStats& input,
+void bob::learn::misc::ISVMachine::forward(const bob::learn::misc::GMMStats& input,
   double& score) const
 {
   forward_(input, score);
 }
 
-void bob::machine::ISVMachine::forward(const bob::machine::GMMStats& gmm_stats,
+void bob::learn::misc::ISVMachine::forward(const bob::learn::misc::GMMStats& gmm_stats,
   const blitz::Array<double,1>& Ux, double& score) const
 {
   // Checks that a Base machine has been set
   if (!m_isv_base) throw std::runtime_error("No UBM was set in the JFA machine.");
 
-  score = bob::machine::linearScoring(m_cache_mDz,
+  score = bob::learn::misc::linearScoring(m_cache_mDz,
             m_isv_base->getUbm()->getMeanSupervector(), m_isv_base->getUbm()->getVarianceSupervector(),
             gmm_stats, Ux, true);
 }
 
-void bob::machine::ISVMachine::forward_(const bob::machine::GMMStats& input,
+void bob::learn::misc::ISVMachine::forward_(const bob::learn::misc::GMMStats& input,
   double& score) const
 {
   // Checks that a Base machine has been set
@@ -784,7 +784,7 @@ void bob::machine::ISVMachine::forward_(const bob::machine::GMMStats& input,
   estimateX(input, m_cache_x);
   bob::math::prod(m_isv_base->getU(), m_cache_x, m_tmp_Ux);
 
-  score = bob::machine::linearScoring(m_cache_mDz,
+  score = bob::learn::misc::linearScoring(m_cache_mDz,
             m_isv_base->getUbm()->getMeanSupervector(), m_isv_base->getUbm()->getVarianceSupervector(),
             input, m_tmp_Ux, true);
 }
diff --git a/bob/learn/misc/cpp/JFATrainer.cpp b/bob/learn/misc/cpp/JFATrainer.cpp
index 28e18c1..b53aa7c 100644
--- a/bob/learn/misc/cpp/JFATrainer.cpp
+++ b/bob/learn/misc/cpp/JFATrainer.cpp
@@ -18,23 +18,23 @@
 #include <algorithm>
 
 
-bob::trainer::FABaseTrainer::FABaseTrainer():
+bob::learn::misc::FABaseTrainer::FABaseTrainer():
   m_Nid(0), m_dim_C(0), m_dim_D(0), m_dim_ru(0), m_dim_rv(0),
   m_x(0), m_y(0), m_z(0), m_Nacc(0), m_Facc(0)
 {
 }
 
-bob::trainer::FABaseTrainer::FABaseTrainer(const bob::trainer::FABaseTrainer& other)
+bob::learn::misc::FABaseTrainer::FABaseTrainer(const bob::learn::misc::FABaseTrainer& other)
 {
 }
 
-bob::trainer::FABaseTrainer::~FABaseTrainer()
+bob::learn::misc::FABaseTrainer::~FABaseTrainer()
 {
 }
 
-void bob::trainer::FABaseTrainer::checkStatistics(
-  const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::checkStatistics(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   for (size_t id=0; id<stats.size(); ++id) {
     for (size_t s=0; s<stats[id].size(); ++s) {
@@ -53,12 +53,12 @@ void bob::trainer::FABaseTrainer::checkStatistics(
 }
 
 
-void bob::trainer::FABaseTrainer::initUbmNidSumStatistics(
-  const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::initUbmNidSumStatistics(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   m_Nid = stats.size();
-  boost::shared_ptr<bob::machine::GMMMachine> ubm = m.getUbm();
+  boost::shared_ptr<bob::learn::misc::GMMMachine> ubm = m.getUbm();
   // Put UBM in cache
   m_dim_C = ubm->getNGaussians();
   m_dim_D = ubm->getNInputs();
@@ -73,8 +73,8 @@ void bob::trainer::FABaseTrainer::initUbmNidSumStatistics(
   initCache();
 }
 
-void bob::trainer::FABaseTrainer::precomputeSumStatisticsN(
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsN(
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   m_Nacc.clear();
   blitz::Array<double,1> Nsum(m_dim_C);
@@ -87,8 +87,8 @@ void bob::trainer::FABaseTrainer::precomputeSumStatisticsN(
   }
 }
 
-void bob::trainer::FABaseTrainer::precomputeSumStatisticsF(
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::precomputeSumStatisticsF(
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   m_Facc.clear();
   blitz::Array<double,1> Fsum(m_dim_C*m_dim_D);
@@ -104,7 +104,7 @@ void bob::trainer::FABaseTrainer::precomputeSumStatisticsF(
   }
 }
 
-void bob::trainer::FABaseTrainer::initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& vec)
+void bob::learn::misc::FABaseTrainer::initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& vec)
 {
   m_x.clear();
   m_y.clear();
@@ -126,7 +126,7 @@ void bob::trainer::FABaseTrainer::initializeXYZ(const std::vector<std::vector<bo
   }
 }
 
-void bob::trainer::FABaseTrainer::resetXYZ()
+void bob::learn::misc::FABaseTrainer::resetXYZ()
 {
   for (size_t i=0; i<m_x.size(); ++i)
   {
@@ -136,7 +136,7 @@ void bob::trainer::FABaseTrainer::resetXYZ()
   }
 }
 
-void bob::trainer::FABaseTrainer::initCache()
+void bob::learn::misc::FABaseTrainer::initCache()
 {
   const size_t dim_CD = m_dim_C*m_dim_D;
   // U
@@ -177,7 +177,7 @@ void bob::trainer::FABaseTrainer::initCache()
 
 
 //////////////////////////// V ///////////////////////////
-void bob::trainer::FABaseTrainer::computeVtSigmaInv(const bob::machine::FABase& m)
+void bob::learn::misc::FABaseTrainer::computeVtSigmaInv(const bob::learn::misc::FABase& m)
 {
   const blitz::Array<double,2>& V = m.getV();
   // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
@@ -189,7 +189,7 @@ void bob::trainer::FABaseTrainer::computeVtSigmaInv(const bob::machine::FABase&
   m_cache_VtSigmaInv = Vt(i,j) / sigma(j); // Vt * diag(sigma)^-1
 }
 
-void bob::trainer::FABaseTrainer::computeVProd(const bob::machine::FABase& m)
+void bob::learn::misc::FABaseTrainer::computeVProd(const bob::learn::misc::FABase& m)
 {
   const blitz::Array<double,2>& V = m.getV();
   blitz::firstIndex i;
@@ -207,7 +207,7 @@ void bob::trainer::FABaseTrainer::computeVProd(const bob::machine::FABase& m)
   }
 }
 
-void bob::trainer::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
+void bob::learn::misc::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
 {
   const blitz::Array<double,1>& Ni = m_Nacc[id];
   bob::math::eye(m_tmp_rvrv); // m_tmp_rvrv = I
@@ -219,8 +219,8 @@ void bob::trainer::FABaseTrainer::computeIdPlusVProd_i(const size_t id)
   bob::math::inv(m_tmp_rvrv, m_cache_IdPlusVProd_i); // m_cache_IdPlusVProd_i = ( I+Vt*diag(sigma)^-1*Ni*V)^-1
 }
 
-void bob::trainer::FABaseTrainer::computeFn_y_i(const bob::machine::FABase& mb,
-  const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& stats, const size_t id)
+void bob::learn::misc::FABaseTrainer::computeFn_y_i(const bob::learn::misc::FABase& mb,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
 {
   const blitz::Array<double,2>& U = mb.getU();
   const blitz::Array<double,1>& d = mb.getD();
@@ -243,7 +243,7 @@ void bob::trainer::FABaseTrainer::computeFn_y_i(const bob::machine::FABase& mb,
   // Fn_yi = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
 }
 
-void bob::trainer::FABaseTrainer::updateY_i(const size_t id)
+void bob::learn::misc::FABaseTrainer::updateY_i(const size_t id)
 {
   // Computes yi = Ayi * Cvs * Fn_yi
   blitz::Array<double,1>& y = m_y[id];
@@ -252,8 +252,8 @@ void bob::trainer::FABaseTrainer::updateY_i(const size_t id)
   bob::math::prod(m_cache_IdPlusVProd_i, m_tmp_rv, y);
 }
 
-void bob::trainer::FABaseTrainer::updateY(const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::updateY(const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   // Precomputation
   computeVtSigmaInv(m);
@@ -266,9 +266,9 @@ void bob::trainer::FABaseTrainer::updateY(const bob::machine::FABase& m,
   }
 }
 
-void bob::trainer::FABaseTrainer::computeAccumulatorsV(
-  const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::computeAccumulatorsV(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   // Initializes the cache accumulator
   m_acc_V_A1 = 0.;
@@ -294,7 +294,7 @@ void bob::trainer::FABaseTrainer::computeAccumulatorsV(
   }
 }
 
-void bob::trainer::FABaseTrainer::updateV(blitz::Array<double,2>& V)
+void bob::learn::misc::FABaseTrainer::updateV(blitz::Array<double,2>& V)
 {
   blitz::Range rall = blitz::Range::all();
   for (size_t c=0; c<m_dim_C; ++c)
@@ -309,7 +309,7 @@ void bob::trainer::FABaseTrainer::updateV(blitz::Array<double,2>& V)
 
 
 //////////////////////////// U ///////////////////////////
-void bob::trainer::FABaseTrainer::computeUtSigmaInv(const bob::machine::FABase& m)
+void bob::learn::misc::FABaseTrainer::computeUtSigmaInv(const bob::learn::misc::FABase& m)
 {
   const blitz::Array<double,2>& U = m.getU();
   // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
@@ -321,7 +321,7 @@ void bob::trainer::FABaseTrainer::computeUtSigmaInv(const bob::machine::FABase&
   m_cache_UtSigmaInv = Ut(i,j) / sigma(j); // Ut * diag(sigma)^-1
 }
 
-void bob::trainer::FABaseTrainer::computeUProd(const bob::machine::FABase& m)
+void bob::learn::misc::FABaseTrainer::computeUProd(const bob::learn::misc::FABase& m)
 {
   const blitz::Array<double,2>& U = m.getU();
   blitz::firstIndex i;
@@ -338,8 +338,8 @@ void bob::trainer::FABaseTrainer::computeUProd(const bob::machine::FABase& m)
   }
 }
 
-void bob::trainer::FABaseTrainer::computeIdPlusUProd_ih(
-  const boost::shared_ptr<bob::machine::GMMStats>& stats)
+void bob::learn::misc::FABaseTrainer::computeIdPlusUProd_ih(
+  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats)
 {
   const blitz::Array<double,1>& Nih = stats->n;
   bob::math::eye(m_tmp_ruru); // m_tmp_ruru = I
@@ -350,8 +350,8 @@ void bob::trainer::FABaseTrainer::computeIdPlusUProd_ih(
   bob::math::inv(m_tmp_ruru, m_cache_IdPlusUProd_ih); // m_cache_IdPlusUProd_ih = ( I+Ut*diag(sigma)^-1*Ni*U)^-1
 }
 
-void bob::trainer::FABaseTrainer::computeFn_x_ih(const bob::machine::FABase& mb,
-  const boost::shared_ptr<bob::machine::GMMStats>& stats, const size_t id)
+void bob::learn::misc::FABaseTrainer::computeFn_x_ih(const bob::learn::misc::FABase& mb,
+  const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id)
 {
   const blitz::Array<double,2>& V = mb.getV();
   const blitz::Array<double,1>& d =  mb.getD();
@@ -373,7 +373,7 @@ void bob::trainer::FABaseTrainer::computeFn_x_ih(const bob::machine::FABase& mb,
   // Fn_x_ih = N_{i,h}*(o_{i,h} - m - D*z_{i} - V*y_{i})
 }
 
-void bob::trainer::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
+void bob::learn::misc::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
 {
   // Computes xih = Axih * Cus * Fn_x_ih
   blitz::Array<double,1> x = m_x[id](blitz::Range::all(), h);
@@ -382,8 +382,8 @@ void bob::trainer::FABaseTrainer::updateX_ih(const size_t id, const size_t h)
   bob::math::prod(m_cache_IdPlusUProd_ih, m_tmp_ru, x);
 }
 
-void bob::trainer::FABaseTrainer::updateX(const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::updateX(const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   // Precomputation
   computeUtSigmaInv(m);
@@ -399,9 +399,9 @@ void bob::trainer::FABaseTrainer::updateX(const bob::machine::FABase& m,
   }
 }
 
-void bob::trainer::FABaseTrainer::computeAccumulatorsU(
-  const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::computeAccumulatorsU(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   // Initializes the cache accumulator
   m_acc_U_A1 = 0.;
@@ -430,7 +430,7 @@ void bob::trainer::FABaseTrainer::computeAccumulatorsU(
   }
 }
 
-void bob::trainer::FABaseTrainer::updateU(blitz::Array<double,2>& U)
+void bob::learn::misc::FABaseTrainer::updateU(blitz::Array<double,2>& U)
 {
   for (size_t c=0; c<m_dim_C; ++c)
   {
@@ -444,21 +444,21 @@ void bob::trainer::FABaseTrainer::updateU(blitz::Array<double,2>& U)
 
 
 //////////////////////////// D ///////////////////////////
-void bob::trainer::FABaseTrainer::computeDtSigmaInv(const bob::machine::FABase& m)
+void bob::learn::misc::FABaseTrainer::computeDtSigmaInv(const bob::learn::misc::FABase& m)
 {
   const blitz::Array<double,1>& d = m.getD();
   const blitz::Array<double,1>& sigma = m.getUbmVariance();
   m_cache_DtSigmaInv = d / sigma; // Dt * diag(sigma)^-1
 }
 
-void bob::trainer::FABaseTrainer::computeDProd(const bob::machine::FABase& m)
+void bob::learn::misc::FABaseTrainer::computeDProd(const bob::learn::misc::FABase& m)
 {
   const blitz::Array<double,1>& d = m.getD();
   const blitz::Array<double,1>& sigma = m.getUbmVariance();
   m_cache_DProd = d / sigma * d; // Dt * diag(sigma)^-1 * D
 }
 
-void bob::trainer::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
+void bob::learn::misc::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
 {
   const blitz::Array<double,1>& Ni = m_Nacc[id];
   bob::core::array::repelem(Ni, m_tmp_CD); // m_tmp_CD = Ni 'repmat'
@@ -467,9 +467,9 @@ void bob::trainer::FABaseTrainer::computeIdPlusDProd_i(const size_t id)
   m_cache_IdPlusDProd_i = 1 / m_cache_IdPlusDProd_i; // m_cache_IdPlusVProd_i = (I+Dt*diag(sigma)^-1*Ni*D)^-1
 }
 
-void bob::trainer::FABaseTrainer::computeFn_z_i(
-  const bob::machine::FABase& mb,
-  const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& stats, const size_t id)
+void bob::learn::misc::FABaseTrainer::computeFn_z_i(
+  const bob::learn::misc::FABase& mb,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id)
 {
   const blitz::Array<double,2>& U = mb.getU();
   const blitz::Array<double,2>& V = mb.getV();
@@ -494,7 +494,7 @@ void bob::trainer::FABaseTrainer::computeFn_z_i(
   // Fn_z_i = sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
 }
 
-void bob::trainer::FABaseTrainer::updateZ_i(const size_t id)
+void bob::learn::misc::FABaseTrainer::updateZ_i(const size_t id)
 {
   // Computes zi = Azi * D^T.Sigma^-1 * Fn_zi
   blitz::Array<double,1>& z = m_z[id];
@@ -502,8 +502,8 @@ void bob::trainer::FABaseTrainer::updateZ_i(const size_t id)
   z = m_cache_IdPlusDProd_i * m_cache_DtSigmaInv * m_cache_Fn_z_i;
 }
 
-void bob::trainer::FABaseTrainer::updateZ(const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::updateZ(const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   // Precomputation
   computeDtSigmaInv(m);
@@ -516,9 +516,9 @@ void bob::trainer::FABaseTrainer::updateZ(const bob::machine::FABase& m,
   }
 }
 
-void bob::trainer::FABaseTrainer::computeAccumulatorsD(
-  const bob::machine::FABase& m,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats)
+void bob::learn::misc::FABaseTrainer::computeAccumulatorsD(
+  const bob::learn::misc::FABase& m,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats)
 {
   // Initializes the cache accumulator
   m_acc_D_A1 = 0.;
@@ -538,7 +538,7 @@ void bob::trainer::FABaseTrainer::computeAccumulatorsD(
   }
 }
 
-void bob::trainer::FABaseTrainer::updateD(blitz::Array<double,1>& d)
+void bob::learn::misc::FABaseTrainer::updateD(blitz::Array<double,1>& d)
 {
   d = m_acc_D_A2 / m_acc_D_A1;
 }
@@ -546,59 +546,59 @@ void bob::trainer::FABaseTrainer::updateD(blitz::Array<double,1>& d)
 
 
 //////////////////////////// ISVTrainer ///////////////////////////
-bob::trainer::ISVTrainer::ISVTrainer(const size_t max_iterations, const double relevance_factor):
-  EMTrainer<bob::machine::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > >
+bob::learn::misc::ISVTrainer::ISVTrainer(const size_t max_iterations, const double relevance_factor):
+  EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
     (0.001, max_iterations, false),
   m_relevance_factor(relevance_factor)
 {
 }
 
-bob::trainer::ISVTrainer::ISVTrainer(const bob::trainer::ISVTrainer& other):
-  EMTrainer<bob::machine::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > >
+bob::learn::misc::ISVTrainer::ISVTrainer(const bob::learn::misc::ISVTrainer& other):
+  EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
     (other.m_convergence_threshold, other.m_max_iterations,
      other.m_compute_likelihood),
   m_relevance_factor(other.m_relevance_factor)
 {
 }
 
-bob::trainer::ISVTrainer::~ISVTrainer()
+bob::learn::misc::ISVTrainer::~ISVTrainer()
 {
 }
 
-bob::trainer::ISVTrainer& bob::trainer::ISVTrainer::operator=
-(const bob::trainer::ISVTrainer& other)
+bob::learn::misc::ISVTrainer& bob::learn::misc::ISVTrainer::operator=
+(const bob::learn::misc::ISVTrainer& other)
 {
   if (this != &other)
   {
-    bob::trainer::EMTrainer<bob::machine::ISVBase,
-      std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > >::operator=(other);
+    bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
+      std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::operator=(other);
     m_relevance_factor = other.m_relevance_factor;
   }
   return *this;
 }
 
-bool bob::trainer::ISVTrainer::operator==(const bob::trainer::ISVTrainer& b) const
+bool bob::learn::misc::ISVTrainer::operator==(const bob::learn::misc::ISVTrainer& b) const
 {
-  return bob::trainer::EMTrainer<bob::machine::ISVBase,
-            std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > >::operator==(b) &&
+  return bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
+            std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::operator==(b) &&
           m_relevance_factor == b.m_relevance_factor;
 }
 
-bool bob::trainer::ISVTrainer::operator!=(const bob::trainer::ISVTrainer& b) const
+bool bob::learn::misc::ISVTrainer::operator!=(const bob::learn::misc::ISVTrainer& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::trainer::ISVTrainer::is_similar_to(const bob::trainer::ISVTrainer& b,
+bool bob::learn::misc::ISVTrainer::is_similar_to(const bob::learn::misc::ISVTrainer& b,
   const double r_epsilon, const double a_epsilon) const
 {
-  return bob::trainer::EMTrainer<bob::machine::ISVBase,
-            std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > >::is_similar_to(b, r_epsilon, a_epsilon) &&
+  return bob::learn::misc::EMTrainer<bob::learn::misc::ISVBase,
+            std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >::is_similar_to(b, r_epsilon, a_epsilon) &&
           m_relevance_factor == b.m_relevance_factor;
 }
 
-void bob::trainer::ISVTrainer::initialize(bob::machine::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::ISVTrainer::initialize(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
   m_base_trainer.initializeXYZ(ar);
@@ -609,51 +609,51 @@ void bob::trainer::ISVTrainer::initialize(bob::machine::ISVBase& machine,
   machine.precompute();
 }
 
-void bob::trainer::ISVTrainer::initializeD(bob::machine::ISVBase& machine) const
+void bob::learn::misc::ISVTrainer::initializeD(bob::learn::misc::ISVBase& machine) const
 {
   // D = sqrt(variance(UBM) / relevance_factor)
   blitz::Array<double,1> d = machine.updateD();
   d = sqrt(machine.getBase().getUbmVariance() / m_relevance_factor);
 }
 
-void bob::trainer::ISVTrainer::finalize(bob::machine::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::ISVTrainer::finalize(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
 }
 
-void bob::trainer::ISVTrainer::eStep(bob::machine::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::ISVTrainer::eStep(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   m_base_trainer.resetXYZ();
 
-  const bob::machine::FABase& base = machine.getBase();
+  const bob::learn::misc::FABase& base = machine.getBase();
   m_base_trainer.updateX(base, ar);
   m_base_trainer.updateZ(base, ar);
   m_base_trainer.computeAccumulatorsU(base, ar);
 }
 
-void bob::trainer::ISVTrainer::mStep(bob::machine::ISVBase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::ISVTrainer::mStep(bob::learn::misc::ISVBase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   blitz::Array<double,2>& U = machine.updateU();
   m_base_trainer.updateU(U);
   machine.precompute();
 }
 
-double bob::trainer::ISVTrainer::computeLikelihood(bob::machine::ISVBase& machine)
+double bob::learn::misc::ISVTrainer::computeLikelihood(bob::learn::misc::ISVBase& machine)
 {
   // TODO
   return 0;
 }
 
-void bob::trainer::ISVTrainer::enrol(bob::machine::ISVMachine& machine,
-  const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& ar,
+void bob::learn::misc::ISVTrainer::enrol(bob::learn::misc::ISVMachine& machine,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
   const size_t n_iter)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > vvec;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > vvec;
   vvec.push_back(ar);
 
-  const bob::machine::FABase& fb = machine.getISVBase()->getBase();
+  const bob::learn::misc::FABase& fb = machine.getISVBase()->getBase();
 
   m_base_trainer.initUbmNidSumStatistics(fb, vvec);
   m_base_trainer.initializeXYZ(vvec);
@@ -670,22 +670,22 @@ void bob::trainer::ISVTrainer::enrol(bob::machine::ISVMachine& machine,
 
 
 //////////////////////////// JFATrainer ///////////////////////////
-bob::trainer::JFATrainer::JFATrainer(const size_t max_iterations):
+bob::learn::misc::JFATrainer::JFATrainer(const size_t max_iterations):
   m_max_iterations(max_iterations), m_rng(new boost::mt19937())
 {
 }
 
-bob::trainer::JFATrainer::JFATrainer(const bob::trainer::JFATrainer& other):
+bob::learn::misc::JFATrainer::JFATrainer(const bob::learn::misc::JFATrainer& other):
   m_max_iterations(other.m_max_iterations), m_rng(other.m_rng)
 {
 }
 
-bob::trainer::JFATrainer::~JFATrainer()
+bob::learn::misc::JFATrainer::~JFATrainer()
 {
 }
 
-bob::trainer::JFATrainer& bob::trainer::JFATrainer::operator=
-(const bob::trainer::JFATrainer& other)
+bob::learn::misc::JFATrainer& bob::learn::misc::JFATrainer::operator=
+(const bob::learn::misc::JFATrainer& other)
 {
   if (this != &other)
   {
@@ -695,24 +695,24 @@ bob::trainer::JFATrainer& bob::trainer::JFATrainer::operator=
   return *this;
 }
 
-bool bob::trainer::JFATrainer::operator==(const bob::trainer::JFATrainer& b) const
+bool bob::learn::misc::JFATrainer::operator==(const bob::learn::misc::JFATrainer& b) const
 {
   return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
 }
 
-bool bob::trainer::JFATrainer::operator!=(const bob::trainer::JFATrainer& b) const
+bool bob::learn::misc::JFATrainer::operator!=(const bob::learn::misc::JFATrainer& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::trainer::JFATrainer::is_similar_to(const bob::trainer::JFATrainer& b,
+bool bob::learn::misc::JFATrainer::is_similar_to(const bob::learn::misc::JFATrainer& b,
   const double r_epsilon, const double a_epsilon) const
 {
   return m_max_iterations == b.m_max_iterations && *m_rng == *(b.m_rng);
 }
 
-void bob::trainer::JFATrainer::initialize(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::initialize(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   m_base_trainer.initUbmNidSumStatistics(machine.getBase(), ar);
   m_base_trainer.initializeXYZ(ar);
@@ -726,75 +726,75 @@ void bob::trainer::JFATrainer::initialize(bob::machine::JFABase& machine,
   machine.precompute();
 }
 
-void bob::trainer::JFATrainer::eStep1(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::eStep1(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
-  const bob::machine::FABase& base = machine.getBase();
+  const bob::learn::misc::FABase& base = machine.getBase();
   m_base_trainer.updateY(base, ar);
   m_base_trainer.computeAccumulatorsV(base, ar);
 }
 
-void bob::trainer::JFATrainer::mStep1(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::mStep1(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   blitz::Array<double,2>& V = machine.updateV();
   m_base_trainer.updateV(V);
 }
 
-void bob::trainer::JFATrainer::finalize1(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::finalize1(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
-  const bob::machine::FABase& base = machine.getBase();
+  const bob::learn::misc::FABase& base = machine.getBase();
   m_base_trainer.updateY(base, ar);
 }
 
 
-void bob::trainer::JFATrainer::eStep2(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::eStep2(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
-  const bob::machine::FABase& base = machine.getBase();
+  const bob::learn::misc::FABase& base = machine.getBase();
   m_base_trainer.updateX(base, ar);
   m_base_trainer.computeAccumulatorsU(base, ar);
 }
 
-void bob::trainer::JFATrainer::mStep2(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::mStep2(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   blitz::Array<double,2>& U = machine.updateU();
   m_base_trainer.updateU(U);
   machine.precompute();
 }
 
-void bob::trainer::JFATrainer::finalize2(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::finalize2(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
-  const bob::machine::FABase& base = machine.getBase();
+  const bob::learn::misc::FABase& base = machine.getBase();
   m_base_trainer.updateX(base, ar);
 }
 
 
-void bob::trainer::JFATrainer::eStep3(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::eStep3(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
-  const bob::machine::FABase& base = machine.getBase();
+  const bob::learn::misc::FABase& base = machine.getBase();
   m_base_trainer.updateZ(base, ar);
   m_base_trainer.computeAccumulatorsD(base, ar);
 }
 
-void bob::trainer::JFATrainer::mStep3(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::mStep3(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   blitz::Array<double,1>& d = machine.updateD();
   m_base_trainer.updateD(d);
 }
 
-void bob::trainer::JFATrainer::finalize3(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::finalize3(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
 }
 
-void bob::trainer::JFATrainer::train_loop(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::train_loop(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   // V subspace
   for (size_t i=0; i<m_max_iterations; ++i) {
@@ -816,21 +816,21 @@ void bob::trainer::JFATrainer::train_loop(bob::machine::JFABase& machine,
   finalize3(machine, ar);
 }
 
-void bob::trainer::JFATrainer::train(bob::machine::JFABase& machine,
-  const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar)
+void bob::learn::misc::JFATrainer::train(bob::learn::misc::JFABase& machine,
+  const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar)
 {
   initialize(machine, ar);
   train_loop(machine, ar);
 }
 
-void bob::trainer::JFATrainer::enrol(bob::machine::JFAMachine& machine,
-  const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& ar,
+void bob::learn::misc::JFATrainer::enrol(bob::learn::misc::JFAMachine& machine,
+  const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& ar,
   const size_t n_iter)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > vvec;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > vvec;
   vvec.push_back(ar);
 
-  const bob::machine::FABase& fb = machine.getJFABase()->getBase();
+  const bob::learn::misc::FABase& fb = machine.getJFABase()->getBase();
 
   m_base_trainer.initUbmNidSumStatistics(fb, vvec);
   m_base_trainer.initializeXYZ(vvec);
diff --git a/bob/learn/misc/cpp/KMeansMachine.cpp b/bob/learn/misc/cpp/KMeansMachine.cpp
index 75a87d0..d30cee4 100644
--- a/bob/learn/misc/cpp/KMeansMachine.cpp
+++ b/bob/learn/misc/cpp/KMeansMachine.cpp
@@ -13,43 +13,43 @@
 #include <bob.core/array_copy.h>
 #include <limits>
 
-bob::machine::KMeansMachine::KMeansMachine():
+bob::learn::misc::KMeansMachine::KMeansMachine():
   m_n_means(0), m_n_inputs(0), m_means(0,0),
   m_cache_means(0,0)
 {
   m_means = 0;
 }
 
-bob::machine::KMeansMachine::KMeansMachine(const size_t n_means, const size_t n_inputs):
+bob::learn::misc::KMeansMachine::KMeansMachine(const size_t n_means, const size_t n_inputs):
   m_n_means(n_means), m_n_inputs(n_inputs), m_means(n_means, n_inputs),
   m_cache_means(n_means, n_inputs)
 {
   m_means = 0;
 }
 
-bob::machine::KMeansMachine::KMeansMachine(const blitz::Array<double,2>& means):
+bob::learn::misc::KMeansMachine::KMeansMachine(const blitz::Array<double,2>& means):
   m_n_means(means.extent(0)), m_n_inputs(means.extent(1)),
   m_means(bob::core::array::ccopy(means)),
   m_cache_means(means.shape())
 {
 }
 
-bob::machine::KMeansMachine::KMeansMachine(const bob::machine::KMeansMachine& other):
+bob::learn::misc::KMeansMachine::KMeansMachine(const bob::learn::misc::KMeansMachine& other):
   m_n_means(other.m_n_means), m_n_inputs(other.m_n_inputs),
   m_means(bob::core::array::ccopy(other.m_means)),
   m_cache_means(other.m_cache_means.shape())
 {
 }
 
-bob::machine::KMeansMachine::KMeansMachine(bob::io::base::HDF5File& config)
+bob::learn::misc::KMeansMachine::KMeansMachine(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::KMeansMachine::~KMeansMachine() { }
+bob::learn::misc::KMeansMachine::~KMeansMachine() { }
 
-bob::machine::KMeansMachine& bob::machine::KMeansMachine::operator=
-(const bob::machine::KMeansMachine& other)
+bob::learn::misc::KMeansMachine& bob::learn::misc::KMeansMachine::operator=
+(const bob::learn::misc::KMeansMachine& other)
 {
   if(this != &other)
   {
@@ -61,25 +61,25 @@ bob::machine::KMeansMachine& bob::machine::KMeansMachine::operator=
   return *this;
 }
 
-bool bob::machine::KMeansMachine::operator==(const bob::machine::KMeansMachine& b) const
+bool bob::learn::misc::KMeansMachine::operator==(const bob::learn::misc::KMeansMachine& b) const
 {
   return m_n_inputs == b.m_n_inputs && m_n_means == b.m_n_means &&
          bob::core::array::isEqual(m_means, b.m_means);
 }
 
-bool bob::machine::KMeansMachine::operator!=(const bob::machine::KMeansMachine& b) const
+bool bob::learn::misc::KMeansMachine::operator!=(const bob::learn::misc::KMeansMachine& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::KMeansMachine::is_similar_to(const bob::machine::KMeansMachine& b,
+bool bob::learn::misc::KMeansMachine::is_similar_to(const bob::learn::misc::KMeansMachine& b,
   const double r_epsilon, const double a_epsilon) const
 {
   return m_n_inputs == b.m_n_inputs && m_n_means == b.m_n_means &&
          bob::core::array::isClose(m_means, b.m_means, r_epsilon, a_epsilon);
 }
 
-void bob::machine::KMeansMachine::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::KMeansMachine::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   m_means.reference(config.readArray<double,2>("means"));
@@ -88,18 +88,18 @@ void bob::machine::KMeansMachine::load(bob::io::base::HDF5File& config)
   m_cache_means.resize(m_n_means, m_n_inputs);
 }
 
-void bob::machine::KMeansMachine::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::KMeansMachine::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("means", m_means);
 }
 
-void bob::machine::KMeansMachine::setMeans(const blitz::Array<double,2> &means)
+void bob::learn::misc::KMeansMachine::setMeans(const blitz::Array<double,2> &means)
 {
   bob::core::array::assertSameShape(means, m_means);
   m_means = means;
 }
 
-void bob::machine::KMeansMachine::setMean(const size_t i, const blitz::Array<double,1> &mean)
+void bob::learn::misc::KMeansMachine::setMean(const size_t i, const blitz::Array<double,1> &mean)
 {
   if(i>=m_n_means) {
     boost::format m("cannot set mean with index %lu: out of bounds [0,%lu[");
@@ -110,7 +110,7 @@ void bob::machine::KMeansMachine::setMean(const size_t i, const blitz::Array<dou
   m_means(i,blitz::Range::all()) = mean;
 }
 
-void bob::machine::KMeansMachine::getMean(const size_t i, blitz::Array<double,1> &mean) const
+void bob::learn::misc::KMeansMachine::getMean(const size_t i, blitz::Array<double,1> &mean) const
 {
   if(i>=m_n_means) {
     boost::format m("cannot get mean with index %lu: out of bounds [0,%lu[");
@@ -121,13 +121,13 @@ void bob::machine::KMeansMachine::getMean(const size_t i, blitz::Array<double,1>
   mean = m_means(i,blitz::Range::all());
 }
 
-double bob::machine::KMeansMachine::getDistanceFromMean(const blitz::Array<double,1> &x,
+double bob::learn::misc::KMeansMachine::getDistanceFromMean(const blitz::Array<double,1> &x,
   const size_t i) const
 {
   return blitz::sum(blitz::pow2(m_means(i,blitz::Range::all()) - x));
 }
 
-void bob::machine::KMeansMachine::getClosestMean(const blitz::Array<double,1> &x,
+void bob::learn::misc::KMeansMachine::getClosestMean(const blitz::Array<double,1> &x,
   size_t &closest_mean, double &min_distance) const
 {
   min_distance = std::numeric_limits<double>::max();
@@ -141,7 +141,7 @@ void bob::machine::KMeansMachine::getClosestMean(const blitz::Array<double,1> &x
   }
 }
 
-double bob::machine::KMeansMachine::getMinDistance(const blitz::Array<double,1>& input) const
+double bob::learn::misc::KMeansMachine::getMinDistance(const blitz::Array<double,1>& input) const
 {
   size_t closest_mean = 0;
   double min_distance = 0;
@@ -149,7 +149,7 @@ double bob::machine::KMeansMachine::getMinDistance(const blitz::Array<double,1>&
   return min_distance;
 }
 
-void bob::machine::KMeansMachine::getVariancesAndWeightsForEachClusterInit(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachClusterInit(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
 {
   // check arguments
   bob::core::array::assertSameShape(variances, m_means);
@@ -165,7 +165,7 @@ void bob::machine::KMeansMachine::getVariancesAndWeightsForEachClusterInit(blitz
   m_cache_means = 0;
 }
 
-void bob::machine::KMeansMachine::getVariancesAndWeightsForEachClusterAcc(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachClusterAcc(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
 {
   // check arguments
   bob::core::array::assertSameShape(variances, m_means);
@@ -189,7 +189,7 @@ void bob::machine::KMeansMachine::getVariancesAndWeightsForEachClusterAcc(const
   }
 }
 
-void bob::machine::KMeansMachine::getVariancesAndWeightsForEachClusterFin(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachClusterFin(blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
 {
   // check arguments
   bob::core::array::assertSameShape(variances, m_means);
@@ -210,13 +210,13 @@ void bob::machine::KMeansMachine::getVariancesAndWeightsForEachClusterFin(blitz:
   weights = weights / blitz::sum(weights);
 }
 
-void bob::machine::KMeansMachine::setCacheMeans(const blitz::Array<double,2> &cache_means)
+void bob::learn::misc::KMeansMachine::setCacheMeans(const blitz::Array<double,2> &cache_means)
 {
   bob::core::array::assertSameShape(cache_means, m_cache_means);
   m_cache_means = cache_means;
 }
 
-void bob::machine::KMeansMachine::getVariancesAndWeightsForEachCluster(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
+void bob::learn::misc::KMeansMachine::getVariancesAndWeightsForEachCluster(const blitz::Array<double,2>& data, blitz::Array<double,2>& variances, blitz::Array<double,1>& weights) const
 {
   // initialise
   getVariancesAndWeightsForEachClusterInit(variances, weights);
@@ -226,7 +226,7 @@ void bob::machine::KMeansMachine::getVariancesAndWeightsForEachCluster(const bli
   getVariancesAndWeightsForEachClusterFin(variances, weights);
 }
 
-void bob::machine::KMeansMachine::forward(const blitz::Array<double,1>& input, double& output) const
+void bob::learn::misc::KMeansMachine::forward(const blitz::Array<double,1>& input, double& output) const
 {
   if(static_cast<size_t>(input.extent(0)) != m_n_inputs) {
     boost::format m("machine input size (%u) does not match the size of input array (%d)");
@@ -236,12 +236,12 @@ void bob::machine::KMeansMachine::forward(const blitz::Array<double,1>& input, d
   forward_(input,output);
 }
 
-void bob::machine::KMeansMachine::forward_(const blitz::Array<double,1>& input, double& output) const
+void bob::learn::misc::KMeansMachine::forward_(const blitz::Array<double,1>& input, double& output) const
 {
   output = getMinDistance(input);
 }
 
-void bob::machine::KMeansMachine::resize(const size_t n_means, const size_t n_inputs)
+void bob::learn::misc::KMeansMachine::resize(const size_t n_means, const size_t n_inputs)
 {
   m_n_means = n_means;
   m_n_inputs = n_inputs;
@@ -249,11 +249,9 @@ void bob::machine::KMeansMachine::resize(const size_t n_means, const size_t n_in
   m_cache_means.resizeAndPreserve(n_means, n_inputs);
 }
 
-namespace bob{
-  namespace machine{
-    std::ostream& operator<<(std::ostream& os, const KMeansMachine& km) {
-      os << "Means = " << km.m_means << std::endl;
-      return os;
-    }
+namespace bob { namespace learn { namespace misc {
+  std::ostream& operator<<(std::ostream& os, const KMeansMachine& km) {
+    os << "Means = " << km.m_means << std::endl;
+    return os;
   }
-}
+} } }
diff --git a/bob/learn/misc/cpp/KMeansTrainer.cpp b/bob/learn/misc/cpp/KMeansTrainer.cpp
index d902b23..ae69ee7 100644
--- a/bob/learn/misc/cpp/KMeansTrainer.cpp
+++ b/bob/learn/misc/cpp/KMeansTrainer.cpp
@@ -14,9 +14,9 @@
 #include <boost/random/discrete_distribution.hpp>
 #endif
 
-bob::trainer::KMeansTrainer::KMeansTrainer(double convergence_threshold,
+bob::learn::misc::KMeansTrainer::KMeansTrainer(double convergence_threshold,
     size_t max_iterations, bool compute_likelihood, InitializationMethod i_m):
-  bob::trainer::EMTrainer<bob::machine::KMeansMachine, blitz::Array<double,2> >(
+  bob::learn::misc::EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> >(
     convergence_threshold, max_iterations, compute_likelihood),
   m_initialization_method(i_m),
   m_rng(new boost::mt19937()), m_average_min_distance(0),
@@ -24,8 +24,8 @@ bob::trainer::KMeansTrainer::KMeansTrainer(double convergence_threshold,
 {
 }
 
-bob::trainer::KMeansTrainer::KMeansTrainer(const bob::trainer::KMeansTrainer& other):
-  bob::trainer::EMTrainer<bob::machine::KMeansMachine, blitz::Array<double,2> >(
+bob::learn::misc::KMeansTrainer::KMeansTrainer(const bob::learn::misc::KMeansTrainer& other):
+  bob::learn::misc::EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> >(
     other.m_convergence_threshold, other.m_max_iterations, other.m_compute_likelihood),
   m_initialization_method(other.m_initialization_method),
   m_rng(other.m_rng), m_average_min_distance(other.m_average_min_distance),
@@ -34,12 +34,12 @@ bob::trainer::KMeansTrainer::KMeansTrainer(const bob::trainer::KMeansTrainer& ot
 {
 }
 
-bob::trainer::KMeansTrainer& bob::trainer::KMeansTrainer::operator=
-(const bob::trainer::KMeansTrainer& other)
+bob::learn::misc::KMeansTrainer& bob::learn::misc::KMeansTrainer::operator=
+(const bob::learn::misc::KMeansTrainer& other)
 {
   if(this != &other)
   {
-    EMTrainer<bob::machine::KMeansMachine, blitz::Array<double,2> >::operator=(other);
+    EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> >::operator=(other);
     m_initialization_method = other.m_initialization_method;
     m_rng = other.m_rng;
     m_average_min_distance = other.m_average_min_distance;
@@ -49,8 +49,8 @@ bob::trainer::KMeansTrainer& bob::trainer::KMeansTrainer::operator=
   return *this;
 }
 
-bool bob::trainer::KMeansTrainer::operator==(const bob::trainer::KMeansTrainer& b) const {
-  return EMTrainer<bob::machine::KMeansMachine, blitz::Array<double,2> >::operator==(b) &&
+bool bob::learn::misc::KMeansTrainer::operator==(const bob::learn::misc::KMeansTrainer& b) const {
+  return EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> >::operator==(b) &&
          m_initialization_method == b.m_initialization_method &&
          *m_rng == *(b.m_rng) && m_average_min_distance == b.m_average_min_distance &&
          bob::core::array::hasSameShape(m_zeroethOrderStats, b.m_zeroethOrderStats) &&
@@ -59,11 +59,11 @@ bool bob::trainer::KMeansTrainer::operator==(const bob::trainer::KMeansTrainer&
          blitz::all(m_firstOrderStats == b.m_firstOrderStats);
 }
 
-bool bob::trainer::KMeansTrainer::operator!=(const bob::trainer::KMeansTrainer& b) const {
+bool bob::learn::misc::KMeansTrainer::operator!=(const bob::learn::misc::KMeansTrainer& b) const {
   return !(this->operator==(b));
 }
 
-void bob::trainer::KMeansTrainer::initialize(bob::machine::KMeansMachine& kmeans,
+void bob::learn::misc::KMeansTrainer::initialize(bob::learn::misc::KMeansMachine& kmeans,
   const blitz::Array<double,2>& ar)
 {
   // split data into as many chunks as there are means
@@ -171,7 +171,7 @@ void bob::trainer::KMeansTrainer::initialize(bob::machine::KMeansMachine& kmeans
   m_firstOrderStats.resize(kmeans.getNMeans(), kmeans.getNInputs());
 }
 
-void bob::trainer::KMeansTrainer::eStep(bob::machine::KMeansMachine& kmeans,
+void bob::learn::misc::KMeansTrainer::eStep(bob::learn::misc::KMeansMachine& kmeans,
   const blitz::Array<double,2>& ar)
 {
   // initialise the accumulators
@@ -196,7 +196,7 @@ void bob::trainer::KMeansTrainer::eStep(bob::machine::KMeansMachine& kmeans,
   m_average_min_distance /= static_cast<double>(ar.extent(0));
 }
 
-void bob::trainer::KMeansTrainer::mStep(bob::machine::KMeansMachine& kmeans,
+void bob::learn::misc::KMeansTrainer::mStep(bob::learn::misc::KMeansMachine& kmeans,
   const blitz::Array<double,2>&)
 {
   blitz::Array<double,2>& means = kmeans.updateMeans();
@@ -207,17 +207,17 @@ void bob::trainer::KMeansTrainer::mStep(bob::machine::KMeansMachine& kmeans,
   }
 }
 
-double bob::trainer::KMeansTrainer::computeLikelihood(bob::machine::KMeansMachine& kmeans)
+double bob::learn::misc::KMeansTrainer::computeLikelihood(bob::learn::misc::KMeansMachine& kmeans)
 {
   return m_average_min_distance;
 }
 
-void bob::trainer::KMeansTrainer::finalize(bob::machine::KMeansMachine& kmeans,
+void bob::learn::misc::KMeansTrainer::finalize(bob::learn::misc::KMeansMachine& kmeans,
   const blitz::Array<double,2>& ar)
 {
 }
 
-bool bob::trainer::KMeansTrainer::resetAccumulators(bob::machine::KMeansMachine& kmeans)
+bool bob::learn::misc::KMeansTrainer::resetAccumulators(bob::learn::misc::KMeansMachine& kmeans)
 {
   m_average_min_distance = 0;
   m_zeroethOrderStats = 0;
@@ -225,13 +225,13 @@ bool bob::trainer::KMeansTrainer::resetAccumulators(bob::machine::KMeansMachine&
   return true;
 }
 
-void bob::trainer::KMeansTrainer::setZeroethOrderStats(const blitz::Array<double,1>& zeroethOrderStats)
+void bob::learn::misc::KMeansTrainer::setZeroethOrderStats(const blitz::Array<double,1>& zeroethOrderStats)
 {
   bob::core::array::assertSameShape(m_zeroethOrderStats, zeroethOrderStats);
   m_zeroethOrderStats = zeroethOrderStats;
 }
 
-void bob::trainer::KMeansTrainer::setFirstOrderStats(const blitz::Array<double,2>& firstOrderStats)
+void bob::learn::misc::KMeansTrainer::setFirstOrderStats(const blitz::Array<double,2>& firstOrderStats)
 {
   bob::core::array::assertSameShape(m_firstOrderStats, firstOrderStats);
   m_firstOrderStats = firstOrderStats;
diff --git a/bob/learn/misc/cpp/LinearScoring.cpp b/bob/learn/misc/cpp/LinearScoring.cpp
index 6914488..e9b2365 100644
--- a/bob/learn/misc/cpp/LinearScoring.cpp
+++ b/bob/learn/misc/cpp/LinearScoring.cpp
@@ -7,94 +7,90 @@
 #include <bob.learn.misc/LinearScoring.h>
 #include <bob.math/linear.h>
 
-namespace bob { namespace machine {
-
-namespace detail {
-
-  void linearScoring(const std::vector<blitz::Array<double,1> >& models,
-                     const blitz::Array<double,1>& ubm_mean,
-                     const blitz::Array<double,1>& ubm_variance,
-                     const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
-                     const std::vector<blitz::Array<double,1> >* test_channelOffset,
-                     const bool frame_length_normalisation,
-                     blitz::Array<double,2>& scores)
-  {
-    int C = test_stats[0]->sumPx.extent(0);
-    int D = test_stats[0]->sumPx.extent(1);
-    int CD = C*D;
-    int Tt = test_stats.size();
-    int Tm = models.size();
-
-    // Check output size
-    bob::core::array::assertSameDimensionLength(scores.extent(0), models.size());
-    bob::core::array::assertSameDimensionLength(scores.extent(1), test_stats.size());
-
-    blitz::Array<double,2> A(Tm, CD);
-    blitz::Array<double,2> B(CD, Tt);
-
-    // 1) Compute A
-    for(int t=0; t<Tm; ++t) {
-      blitz::Array<double, 1> tmp = A(t, blitz::Range::all());
-      tmp = (models[t] - ubm_mean) / ubm_variance;
-    }
 
-    // 2) Compute B
-    if(test_channelOffset == 0) {
-      for(int t=0; t<Tt; ++t)
-        for(int s=0; s<CD; ++s)
-          B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (ubm_mean(s) * test_stats[t]->n(s/D));
-    }
-    else {
-      bob::core::array::assertSameDimensionLength((*test_channelOffset).size(), Tt);
-
-      for(int t=0; t<Tt; ++t) {
-        bob::core::array::assertSameDimensionLength((*test_channelOffset)[t].extent(0), CD);
-        for(int s=0; s<CD; ++s)
-          B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (test_stats[t]->n(s/D) * (ubm_mean(s) + (*test_channelOffset)[t](s)));
-      }
-    }
+static void _linearScoring(const std::vector<blitz::Array<double,1> >& models,
+                   const blitz::Array<double,1>& ubm_mean,
+                   const blitz::Array<double,1>& ubm_variance,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
+                   const std::vector<blitz::Array<double,1> >* test_channelOffset,
+                   const bool frame_length_normalisation,
+                   blitz::Array<double,2>& scores)
+{
+  int C = test_stats[0]->sumPx.extent(0);
+  int D = test_stats[0]->sumPx.extent(1);
+  int CD = C*D;
+  int Tt = test_stats.size();
+  int Tm = models.size();
+
+  // Check output size
+  bob::core::array::assertSameDimensionLength(scores.extent(0), models.size());
+  bob::core::array::assertSameDimensionLength(scores.extent(1), test_stats.size());
+
+  blitz::Array<double,2> A(Tm, CD);
+  blitz::Array<double,2> B(CD, Tt);
 
-    // Apply the normalisation if needed
-    if(frame_length_normalisation) {
-      for(int t=0; t<Tt; ++t) {
-        double sum_N = test_stats[t]->T;
-        blitz::Array<double, 1> v_t = B(blitz::Range::all(),t);
-
-        if (sum_N <= std::numeric_limits<double>::epsilon() && sum_N >= -std::numeric_limits<double>::epsilon())
-          v_t = 0;
-        else
-          v_t /= sum_N;
-      }
+  // 1) Compute A
+  for(int t=0; t<Tm; ++t) {
+    blitz::Array<double, 1> tmp = A(t, blitz::Range::all());
+    tmp = (models[t] - ubm_mean) / ubm_variance;
+  }
+
+  // 2) Compute B
+  if(test_channelOffset == 0) {
+    for(int t=0; t<Tt; ++t)
+      for(int s=0; s<CD; ++s)
+        B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (ubm_mean(s) * test_stats[t]->n(s/D));
+  }
+  else {
+    bob::core::array::assertSameDimensionLength((*test_channelOffset).size(), Tt);
+
+    for(int t=0; t<Tt; ++t) {
+      bob::core::array::assertSameDimensionLength((*test_channelOffset)[t].extent(0), CD);
+      for(int s=0; s<CD; ++s)
+        B(s, t) = test_stats[t]->sumPx(s/D, s%D) - (test_stats[t]->n(s/D) * (ubm_mean(s) + (*test_channelOffset)[t](s)));
     }
+  }
 
-    // 3) Compute LLR
-    bob::math::prod(A, B, scores);
+  // Apply the normalisation if needed
+  if(frame_length_normalisation) {
+    for(int t=0; t<Tt; ++t) {
+      double sum_N = test_stats[t]->T;
+      blitz::Array<double, 1> v_t = B(blitz::Range::all(),t);
+
+      if (sum_N <= std::numeric_limits<double>::epsilon() && sum_N >= -std::numeric_limits<double>::epsilon())
+        v_t = 0;
+      else
+        v_t /= sum_N;
+    }
   }
+
+  // 3) Compute LLR
+  bob::math::prod(A, B, scores);
 }
 
 
-void linearScoring(const std::vector<blitz::Array<double,1> >& models,
+void bob::learn::misc::linearScoring(const std::vector<blitz::Array<double,1> >& models,
                    const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const std::vector<blitz::Array<double,1> >& test_channelOffset,
                    const bool frame_length_normalisation,
                    blitz::Array<double, 2>& scores)
 {
-  detail::linearScoring(models, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
+  _linearScoring(models, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
 }
 
-void linearScoring(const std::vector<blitz::Array<double,1> >& models,
+void bob::learn::misc::linearScoring(const std::vector<blitz::Array<double,1> >& models,
                    const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const bool frame_length_normalisation,
                    blitz::Array<double, 2>& scores)
 {
-  detail::linearScoring(models, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
+  _linearScoring(models, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
 }
 
-void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMachine> >& models,
-                   const bob::machine::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+void bob::learn::misc::linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
+                   const bob::learn::misc::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const bool frame_length_normalisation,
                    blitz::Array<double, 2>& scores)
 {
@@ -110,12 +106,12 @@ void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMa
   }
   const blitz::Array<double,1>& ubm_mean = ubm.getMeanSupervector();
   const blitz::Array<double,1>& ubm_variance = ubm.getVarianceSupervector();
-  detail::linearScoring(models_b, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
+  _linearScoring(models_b, ubm_mean, ubm_variance, test_stats, 0, frame_length_normalisation, scores);
 }
 
-void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMachine> >& models,
-                   const bob::machine::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+void bob::learn::misc::linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
+                   const bob::learn::misc::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const std::vector<blitz::Array<double,1> >& test_channelOffset,
                    const bool frame_length_normalisation,
                    blitz::Array<double, 2>& scores)
@@ -132,14 +128,14 @@ void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMa
   }
   const blitz::Array<double,1>& ubm_mean = ubm.getMeanSupervector();
   const blitz::Array<double,1>& ubm_variance = ubm.getVarianceSupervector();
-  detail::linearScoring(models_b, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
+  _linearScoring(models_b, ubm_mean, ubm_variance, test_stats, &test_channelOffset, frame_length_normalisation, scores);
 }
 
 
 
-double linearScoring(const blitz::Array<double,1>& models,
+double bob::learn::misc::linearScoring(const blitz::Array<double,1>& models,
                      const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                     const bob::machine::GMMStats& test_stats,
+                     const bob::learn::misc::GMMStats& test_stats,
                      const blitz::Array<double,1>& test_channelOffset,
                      const bool frame_length_normalisation)
 {
@@ -170,4 +166,3 @@ double linearScoring(const blitz::Array<double,1>& models,
   return blitz::sum(A * B);
 }
 
-}}
diff --git a/bob/learn/misc/cpp/MAP_GMMTrainer.cpp b/bob/learn/misc/cpp/MAP_GMMTrainer.cpp
index f48dd4d..80e110a 100644
--- a/bob/learn/misc/cpp/MAP_GMMTrainer.cpp
+++ b/bob/learn/misc/cpp/MAP_GMMTrainer.cpp
@@ -8,29 +8,29 @@
 #include <bob.learn.misc/MAP_GMMTrainer.h>
 #include <bob.core/check.h>
 
-bob::trainer::MAP_GMMTrainer::MAP_GMMTrainer(const double relevance_factor,
+bob::learn::misc::MAP_GMMTrainer::MAP_GMMTrainer(const double relevance_factor,
     const bool update_means, const bool update_variances,
     const bool update_weights, const double mean_var_update_responsibilities_threshold):
   GMMTrainer(update_means, update_variances, update_weights, mean_var_update_responsibilities_threshold),
   m_relevance_factor(relevance_factor),
-  m_prior_gmm(boost::shared_ptr<bob::machine::GMMMachine>()),
+  m_prior_gmm(boost::shared_ptr<bob::learn::misc::GMMMachine>()),
   m_T3_alpha(0.), m_T3_adaptation(false)
 {
 }
 
-bob::trainer::MAP_GMMTrainer::MAP_GMMTrainer(const bob::trainer::MAP_GMMTrainer& b):
-  bob::trainer::GMMTrainer(b),
+bob::learn::misc::MAP_GMMTrainer::MAP_GMMTrainer(const bob::learn::misc::MAP_GMMTrainer& b):
+  bob::learn::misc::GMMTrainer(b),
   m_relevance_factor(b.m_relevance_factor),
   m_prior_gmm(b.m_prior_gmm),
   m_T3_alpha(b.m_T3_alpha), m_T3_adaptation(b.m_T3_adaptation)
 {
 }
 
-bob::trainer::MAP_GMMTrainer::~MAP_GMMTrainer()
+bob::learn::misc::MAP_GMMTrainer::~MAP_GMMTrainer()
 {
 }
 
-void bob::trainer::MAP_GMMTrainer::initialize(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::MAP_GMMTrainer::initialize(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
   // Check that the prior GMM has been specified
@@ -38,7 +38,7 @@ void bob::trainer::MAP_GMMTrainer::initialize(bob::machine::GMMMachine& gmm,
     throw std::runtime_error("MAP_GMMTrainer: Prior GMM distribution has not been set");
 
   // Allocate memory for the sufficient statistics and initialise
-  bob::trainer::GMMTrainer::initialize(gmm, data);
+  bob::learn::misc::GMMTrainer::initialize(gmm, data);
 
   const size_t n_gaussians = gmm.getNGaussians();
   // TODO: check size?
@@ -54,14 +54,14 @@ void bob::trainer::MAP_GMMTrainer::initialize(bob::machine::GMMMachine& gmm,
   m_cache_ml_weights.resize(n_gaussians);
 }
 
-bool bob::trainer::MAP_GMMTrainer::setPriorGMM(boost::shared_ptr<bob::machine::GMMMachine> prior_gmm)
+bool bob::learn::misc::MAP_GMMTrainer::setPriorGMM(boost::shared_ptr<bob::learn::misc::GMMMachine> prior_gmm)
 {
   if (!prior_gmm) return false;
   m_prior_gmm = prior_gmm;
   return true;
 }
 
-void bob::trainer::MAP_GMMTrainer::mStep(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::MAP_GMMTrainer::mStep(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
   // Read options and variables
@@ -140,12 +140,12 @@ void bob::trainer::MAP_GMMTrainer::mStep(bob::machine::GMMMachine& gmm,
   }
 }
 
-bob::trainer::MAP_GMMTrainer& bob::trainer::MAP_GMMTrainer::operator=
-  (const bob::trainer::MAP_GMMTrainer &other)
+bob::learn::misc::MAP_GMMTrainer& bob::learn::misc::MAP_GMMTrainer::operator=
+  (const bob::learn::misc::MAP_GMMTrainer &other)
 {
   if (this != &other)
   {
-    bob::trainer::GMMTrainer::operator=(other);
+    bob::learn::misc::GMMTrainer::operator=(other);
     m_relevance_factor = other.m_relevance_factor;
     m_prior_gmm = other.m_prior_gmm;
     m_T3_alpha = other.m_T3_alpha;
@@ -156,27 +156,27 @@ bob::trainer::MAP_GMMTrainer& bob::trainer::MAP_GMMTrainer::operator=
   return *this;
 }
 
-bool bob::trainer::MAP_GMMTrainer::operator==
-  (const bob::trainer::MAP_GMMTrainer &other) const
+bool bob::learn::misc::MAP_GMMTrainer::operator==
+  (const bob::learn::misc::MAP_GMMTrainer &other) const
 {
-  return bob::trainer::GMMTrainer::operator==(other) &&
+  return bob::learn::misc::GMMTrainer::operator==(other) &&
          m_relevance_factor == other.m_relevance_factor &&
          m_prior_gmm == other.m_prior_gmm &&
          m_T3_alpha == other.m_T3_alpha &&
          m_T3_adaptation == other.m_T3_adaptation;
 }
 
-bool bob::trainer::MAP_GMMTrainer::operator!=
-  (const bob::trainer::MAP_GMMTrainer &other) const
+bool bob::learn::misc::MAP_GMMTrainer::operator!=
+  (const bob::learn::misc::MAP_GMMTrainer &other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::MAP_GMMTrainer::is_similar_to
-  (const bob::trainer::MAP_GMMTrainer &other, const double r_epsilon,
+bool bob::learn::misc::MAP_GMMTrainer::is_similar_to
+  (const bob::learn::misc::MAP_GMMTrainer &other, const double r_epsilon,
    const double a_epsilon) const
 {
-  return bob::trainer::GMMTrainer::is_similar_to(other, r_epsilon, a_epsilon) &&
+  return bob::learn::misc::GMMTrainer::is_similar_to(other, r_epsilon, a_epsilon) &&
          bob::core::isClose(m_relevance_factor, other.m_relevance_factor, r_epsilon, a_epsilon) &&
          m_prior_gmm == other.m_prior_gmm &&
          bob::core::isClose(m_T3_alpha, other.m_T3_alpha, r_epsilon, a_epsilon) &&
diff --git a/bob/learn/misc/cpp/ML_GMMTrainer.cpp b/bob/learn/misc/cpp/ML_GMMTrainer.cpp
index d595d1a..ec949e0 100644
--- a/bob/learn/misc/cpp/ML_GMMTrainer.cpp
+++ b/bob/learn/misc/cpp/ML_GMMTrainer.cpp
@@ -8,34 +8,34 @@
 #include <bob.learn.misc/ML_GMMTrainer.h>
 #include <algorithm>
 
-bob::trainer::ML_GMMTrainer::ML_GMMTrainer(const bool update_means,
+bob::learn::misc::ML_GMMTrainer::ML_GMMTrainer(const bool update_means,
     const bool update_variances, const bool update_weights,
     const double mean_var_update_responsibilities_threshold):
-  bob::trainer::GMMTrainer(update_means, update_variances, update_weights,
+  bob::learn::misc::GMMTrainer(update_means, update_variances, update_weights,
     mean_var_update_responsibilities_threshold)
 {
 }
 
-bob::trainer::ML_GMMTrainer::ML_GMMTrainer(const bob::trainer::ML_GMMTrainer& b):
-  bob::trainer::GMMTrainer(b)
+bob::learn::misc::ML_GMMTrainer::ML_GMMTrainer(const bob::learn::misc::ML_GMMTrainer& b):
+  bob::learn::misc::GMMTrainer(b)
 {
 }
 
-bob::trainer::ML_GMMTrainer::~ML_GMMTrainer()
+bob::learn::misc::ML_GMMTrainer::~ML_GMMTrainer()
 {
 }
 
-void bob::trainer::ML_GMMTrainer::initialize(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::ML_GMMTrainer::initialize(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
-  bob::trainer::GMMTrainer::initialize(gmm, data);
+  bob::learn::misc::GMMTrainer::initialize(gmm, data);
   // Allocate cache
   size_t n_gaussians = gmm.getNGaussians();
   m_cache_ss_n_thresholded.resize(n_gaussians);
 }
 
 
-void bob::trainer::ML_GMMTrainer::mStep(bob::machine::GMMMachine& gmm,
+void bob::learn::misc::ML_GMMTrainer::mStep(bob::learn::misc::GMMMachine& gmm,
   const blitz::Array<double,2>& data)
 {
   // Read options and variables
@@ -79,33 +79,33 @@ void bob::trainer::ML_GMMTrainer::mStep(bob::machine::GMMMachine& gmm,
   }
 }
 
-bob::trainer::ML_GMMTrainer& bob::trainer::ML_GMMTrainer::operator=
-  (const bob::trainer::ML_GMMTrainer &other)
+bob::learn::misc::ML_GMMTrainer& bob::learn::misc::ML_GMMTrainer::operator=
+  (const bob::learn::misc::ML_GMMTrainer &other)
 {
   if (this != &other)
   {
-    bob::trainer::GMMTrainer::operator=(other);
+    bob::learn::misc::GMMTrainer::operator=(other);
     m_cache_ss_n_thresholded.resize(other.m_cache_ss_n_thresholded.extent(0));
   }
   return *this;
 }
 
-bool bob::trainer::ML_GMMTrainer::operator==
-  (const bob::trainer::ML_GMMTrainer &other) const
+bool bob::learn::misc::ML_GMMTrainer::operator==
+  (const bob::learn::misc::ML_GMMTrainer &other) const
 {
-  return bob::trainer::GMMTrainer::operator==(other);
+  return bob::learn::misc::GMMTrainer::operator==(other);
 }
 
-bool bob::trainer::ML_GMMTrainer::operator!=
-  (const bob::trainer::ML_GMMTrainer &other) const
+bool bob::learn::misc::ML_GMMTrainer::operator!=
+  (const bob::learn::misc::ML_GMMTrainer &other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::ML_GMMTrainer::is_similar_to
-  (const bob::trainer::ML_GMMTrainer &other, const double r_epsilon,
+bool bob::learn::misc::ML_GMMTrainer::is_similar_to
+  (const bob::learn::misc::ML_GMMTrainer &other, const double r_epsilon,
    const double a_epsilon) const
 {
-  return bob::trainer::GMMTrainer::is_similar_to(other, r_epsilon, a_epsilon);
+  return bob::learn::misc::GMMTrainer::is_similar_to(other, r_epsilon, a_epsilon);
 }
 
diff --git a/bob/learn/misc/cpp/PLDAMachine.cpp b/bob/learn/misc/cpp/PLDAMachine.cpp
index e3adafb..0009daf 100644
--- a/bob/learn/misc/cpp/PLDAMachine.cpp
+++ b/bob/learn/misc/cpp/PLDAMachine.cpp
@@ -19,13 +19,13 @@
 #include <boost/lexical_cast.hpp>
 #include <string>
 
-bob::machine::PLDABase::PLDABase():
+bob::learn::misc::PLDABase::PLDABase():
   m_variance_threshold(0.)
 {
   resizeNoInit(0, 0, 0);
 }
 
-bob::machine::PLDABase::PLDABase(const size_t dim_d, const size_t dim_f,
+bob::learn::misc::PLDABase::PLDABase(const size_t dim_d, const size_t dim_f,
     const size_t dim_g, const double variance_threshold):
   m_variance_threshold(variance_threshold)
 {
@@ -33,7 +33,7 @@ bob::machine::PLDABase::PLDABase(const size_t dim_d, const size_t dim_f,
 }
 
 
-bob::machine::PLDABase::PLDABase(const bob::machine::PLDABase& other):
+bob::learn::misc::PLDABase::PLDABase(const bob::learn::misc::PLDABase& other):
   m_dim_d(other.m_dim_d),
   m_dim_f(other.m_dim_f),
   m_dim_g(other.m_dim_g),
@@ -56,15 +56,15 @@ bob::machine::PLDABase::PLDABase(const bob::machine::PLDABase& other):
   resizeTmp();
 }
 
-bob::machine::PLDABase::PLDABase(bob::io::base::HDF5File& config) {
+bob::learn::misc::PLDABase::PLDABase(bob::io::base::HDF5File& config) {
   load(config);
 }
 
-bob::machine::PLDABase::~PLDABase() {
+bob::learn::misc::PLDABase::~PLDABase() {
 }
 
-bob::machine::PLDABase& bob::machine::PLDABase::operator=
-    (const bob::machine::PLDABase& other)
+bob::learn::misc::PLDABase& bob::learn::misc::PLDABase::operator=
+    (const bob::learn::misc::PLDABase& other)
 {
   if (this != &other)
   {
@@ -90,8 +90,8 @@ bob::machine::PLDABase& bob::machine::PLDABase::operator=
   return *this;
 }
 
-bool bob::machine::PLDABase::operator==
-    (const bob::machine::PLDABase& b) const
+bool bob::learn::misc::PLDABase::operator==
+    (const bob::learn::misc::PLDABase& b) const
 {
   if (!(m_dim_d == b.m_dim_d && m_dim_f == b.m_dim_f &&
         m_dim_g == b.m_dim_g &&
@@ -124,13 +124,13 @@ bool bob::machine::PLDABase::operator==
   return true;
 }
 
-bool bob::machine::PLDABase::operator!=
-    (const bob::machine::PLDABase& b) const
+bool bob::learn::misc::PLDABase::operator!=
+    (const bob::learn::misc::PLDABase& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::PLDABase::is_similar_to(const bob::machine::PLDABase& b,
+bool bob::learn::misc::PLDABase::is_similar_to(const bob::learn::misc::PLDABase& b,
   const double r_epsilon, const double a_epsilon) const
 {
   return (m_dim_d == b.m_dim_d && m_dim_f == b.m_dim_f &&
@@ -151,7 +151,7 @@ bool bob::machine::PLDABase::is_similar_to(const bob::machine::PLDABase& b,
           bob::core::isClose(m_cache_loglike_constterm, b.m_cache_loglike_constterm));
 }
 
-void bob::machine::PLDABase::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::PLDABase::load(bob::io::base::HDF5File& config)
 {
   if (!config.contains("dim_d"))
   {
@@ -241,7 +241,7 @@ void bob::machine::PLDABase::load(bob::io::base::HDF5File& config)
   resizeTmp();
 }
 
-void bob::machine::PLDABase::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::PLDABase::save(bob::io::base::HDF5File& config) const
 {
   config.set("dim_d", (uint64_t)m_dim_d);
   config.set("dim_f", (uint64_t)m_dim_f);
@@ -290,7 +290,7 @@ void bob::machine::PLDABase::save(bob::io::base::HDF5File& config) const
   config.set("logdet_sigma", m_cache_logdet_sigma);
 }
 
-void bob::machine::PLDABase::resizeNoInit(const size_t dim_d, const size_t dim_f,
+void bob::learn::misc::PLDABase::resizeNoInit(const size_t dim_d, const size_t dim_f,
     const size_t dim_g)
 {
   m_dim_d = dim_d;
@@ -310,7 +310,7 @@ void bob::machine::PLDABase::resizeNoInit(const size_t dim_d, const size_t dim_f
   resizeTmp();
 }
 
-void bob::machine::PLDABase::resizeTmp()
+void bob::learn::misc::PLDABase::resizeTmp()
 {
   m_tmp_d_1.resize(m_dim_d);
   m_tmp_d_2.resize(m_dim_d);
@@ -319,14 +319,14 @@ void bob::machine::PLDABase::resizeTmp()
   m_tmp_ng_ng_1.resize(m_dim_g, m_dim_g);
 }
 
-void bob::machine::PLDABase::resize(const size_t dim_d, const size_t dim_f,
+void bob::learn::misc::PLDABase::resize(const size_t dim_d, const size_t dim_f,
     const size_t dim_g)
 {
   resizeNoInit(dim_d, dim_f, dim_g);
   initMuFGSigma();
 }
 
-void bob::machine::PLDABase::setF(const blitz::Array<double,2>& F)
+void bob::learn::misc::PLDABase::setF(const blitz::Array<double,2>& F)
 {
   bob::core::array::assertSameShape(F, m_F);
   m_F.reference(bob::core::array::ccopy(F));
@@ -334,7 +334,7 @@ void bob::machine::PLDABase::setF(const blitz::Array<double,2>& F)
   precompute();
 }
 
-void bob::machine::PLDABase::setG(const blitz::Array<double,2>& G)
+void bob::learn::misc::PLDABase::setG(const blitz::Array<double,2>& G)
 {
   bob::core::array::assertSameShape(G, m_G);
   m_G.reference(bob::core::array::ccopy(G));
@@ -343,7 +343,7 @@ void bob::machine::PLDABase::setG(const blitz::Array<double,2>& G)
   precomputeLogDetAlpha();
 }
 
-void bob::machine::PLDABase::setSigma(const blitz::Array<double,1>& sigma)
+void bob::learn::misc::PLDABase::setSigma(const blitz::Array<double,1>& sigma)
 {
   bob::core::array::assertSameShape(sigma, m_sigma);
   m_sigma.reference(bob::core::array::ccopy(sigma));
@@ -352,13 +352,13 @@ void bob::machine::PLDABase::setSigma(const blitz::Array<double,1>& sigma)
   applyVarianceThreshold();
 }
 
-void bob::machine::PLDABase::setMu(const blitz::Array<double,1>& mu)
+void bob::learn::misc::PLDABase::setMu(const blitz::Array<double,1>& mu)
 {
   bob::core::array::assertSameShape(mu, m_mu);
   m_mu.reference(bob::core::array::ccopy(mu));
 }
 
-void bob::machine::PLDABase::setVarianceThreshold(const double value)
+void bob::learn::misc::PLDABase::setVarianceThreshold(const double value)
 {
   // Variance flooring
   m_variance_threshold = value;
@@ -367,7 +367,7 @@ void bob::machine::PLDABase::setVarianceThreshold(const double value)
   applyVarianceThreshold();
 }
 
-void bob::machine::PLDABase::applyVarianceThreshold()
+void bob::learn::misc::PLDABase::applyVarianceThreshold()
 {
    // Apply variance flooring threshold
   m_sigma = blitz::where( m_sigma < m_variance_threshold, m_variance_threshold, m_sigma);
@@ -376,20 +376,20 @@ void bob::machine::PLDABase::applyVarianceThreshold()
   precomputeLogLike();
 }
 
-const blitz::Array<double,2>& bob::machine::PLDABase::getGamma(const size_t a) const
+const blitz::Array<double,2>& bob::learn::misc::PLDABase::getGamma(const size_t a) const
 {
   if(!hasGamma(a))
     throw std::runtime_error("Gamma for this number of samples is not currently in cache. You could use the getAddGamma() method instead");
   return (m_cache_gamma.find(a))->second;
 }
 
-const blitz::Array<double,2>& bob::machine::PLDABase::getAddGamma(const size_t a)
+const blitz::Array<double,2>& bob::learn::misc::PLDABase::getAddGamma(const size_t a)
 {
   if(!hasGamma(a)) precomputeGamma(a);
   return m_cache_gamma[a];
 }
 
-void bob::machine::PLDABase::initMuFGSigma()
+void bob::learn::misc::PLDABase::initMuFGSigma()
 {
   // To avoid problems related to precomputation
   m_mu = 0.;
@@ -401,7 +401,7 @@ void bob::machine::PLDABase::initMuFGSigma()
   precomputeLogLike();
 }
 
-void bob::machine::PLDABase::precompute()
+void bob::learn::misc::PLDABase::precompute()
 {
   precomputeISigma();
   precomputeGtISigma();
@@ -412,19 +412,19 @@ void bob::machine::PLDABase::precompute()
   m_cache_loglike_constterm.clear();
 }
 
-void bob::machine::PLDABase::precomputeLogLike()
+void bob::learn::misc::PLDABase::precomputeLogLike()
 {
   precomputeLogDetAlpha();
   precomputeLogDetSigma();
 }
 
-void bob::machine::PLDABase::precomputeISigma()
+void bob::learn::misc::PLDABase::precomputeISigma()
 {
   // Updates inverse of sigma
   m_cache_isigma = 1. / m_sigma;
 }
 
-void bob::machine::PLDABase::precomputeGtISigma()
+void bob::learn::misc::PLDABase::precomputeGtISigma()
 {
   // m_cache_Gt_isigma = G^T \Sigma^{-1}
   blitz::firstIndex i;
@@ -433,7 +433,7 @@ void bob::machine::PLDABase::precomputeGtISigma()
   m_cache_Gt_isigma = Gt(i,j) * m_cache_isigma(j);
 }
 
-void bob::machine::PLDABase::precomputeAlpha()
+void bob::learn::misc::PLDABase::precomputeAlpha()
 {
   // alpha = (Id + G^T.sigma^-1.G)^-1
 
@@ -445,7 +445,7 @@ void bob::machine::PLDABase::precomputeAlpha()
   bob::math::inv(m_tmp_ng_ng_1, m_cache_alpha);
 }
 
-void bob::machine::PLDABase::precomputeBeta()
+void bob::learn::misc::PLDABase::precomputeBeta()
 {
   // beta = (sigma + G.G^T)^-1
   // BUT, there is a more efficient computation (Woodbury identity):
@@ -462,7 +462,7 @@ void bob::machine::PLDABase::precomputeBeta()
   for(int i=0; i<m_cache_beta.extent(0); ++i) m_cache_beta(i,i) += m_cache_isigma(i);
 }
 
-void bob::machine::PLDABase::precomputeGamma(const size_t a)
+void bob::learn::misc::PLDABase::precomputeGamma(const size_t a)
 {
 
   blitz::Array<double,2> gamma_a(getDimF(),getDimF());
@@ -470,14 +470,14 @@ void bob::machine::PLDABase::precomputeGamma(const size_t a)
   computeGamma(a, gamma_a);
 }
 
-void bob::machine::PLDABase::precomputeFtBeta()
+void bob::learn::misc::PLDABase::precomputeFtBeta()
 {
   // m_cache_Ft_beta = F^T.beta = F^T.(sigma + G.G^T)^-1
   blitz::Array<double,2> Ft = m_F.transpose(1,0);
   bob::math::prod(Ft, m_cache_beta, m_cache_Ft_beta);
 }
 
-void bob::machine::PLDABase::computeGamma(const size_t a,
+void bob::learn::misc::PLDABase::computeGamma(const size_t a,
   blitz::Array<double,2> res) const
 {
   // gamma = (Id + a.F^T.beta.F)^-1
@@ -495,18 +495,18 @@ void bob::machine::PLDABase::computeGamma(const size_t a,
   bob::math::inv(m_tmp_nf_nf_1, res);
 }
 
-void bob::machine::PLDABase::precomputeLogDetAlpha()
+void bob::learn::misc::PLDABase::precomputeLogDetAlpha()
 {
   int sign;
   m_cache_logdet_alpha = bob::math::slogdet(m_cache_alpha, sign);
 }
 
-void bob::machine::PLDABase::precomputeLogDetSigma()
+void bob::learn::misc::PLDABase::precomputeLogDetSigma()
 {
   m_cache_logdet_sigma = blitz::sum(blitz::log(m_sigma));
 }
 
-double bob::machine::PLDABase::computeLogLikeConstTerm(const size_t a,
+double bob::learn::misc::PLDABase::computeLogLikeConstTerm(const size_t a,
   const blitz::Array<double,2>& gamma_a) const
 {
   // loglike_constterm[a] = a/2 *
@@ -519,38 +519,38 @@ double bob::machine::PLDABase::computeLogLikeConstTerm(const size_t a,
   return res;
 }
 
-double bob::machine::PLDABase::computeLogLikeConstTerm(const size_t a)
+double bob::learn::misc::PLDABase::computeLogLikeConstTerm(const size_t a)
 {
   const blitz::Array<double,2>& gamma_a = getAddGamma(a);
   return computeLogLikeConstTerm(a, gamma_a);
 }
 
-void bob::machine::PLDABase::precomputeLogLikeConstTerm(const size_t a)
+void bob::learn::misc::PLDABase::precomputeLogLikeConstTerm(const size_t a)
 {
   double val = computeLogLikeConstTerm(a);
   m_cache_loglike_constterm[a] = val;
 }
 
-double bob::machine::PLDABase::getLogLikeConstTerm(const size_t a) const
+double bob::learn::misc::PLDABase::getLogLikeConstTerm(const size_t a) const
 {
   if(!hasLogLikeConstTerm(a))
     throw std::runtime_error("The LogLikelihood constant term for this number of samples is not currently in cache. You could use the getAddLogLikeConstTerm() method instead");
   return (m_cache_loglike_constterm.find(a))->second;
 }
 
-double bob::machine::PLDABase::getAddLogLikeConstTerm(const size_t a)
+double bob::learn::misc::PLDABase::getAddLogLikeConstTerm(const size_t a)
 {
   if(!hasLogLikeConstTerm(a)) precomputeLogLikeConstTerm(a);
   return m_cache_loglike_constterm[a];
 }
 
-void bob::machine::PLDABase::clearMaps()
+void bob::learn::misc::PLDABase::clearMaps()
 {
   m_cache_gamma.clear();
   m_cache_loglike_constterm.clear();
 }
 
-double bob::machine::PLDABase::computeLogLikelihoodPointEstimate(
+double bob::learn::misc::PLDABase::computeLogLikelihoodPointEstimate(
   const blitz::Array<double,1>& xij, const blitz::Array<double,1>& hi,
   const blitz::Array<double,1>& wij) const
 {
@@ -572,25 +572,23 @@ double bob::machine::PLDABase::computeLogLikelihoodPointEstimate(
   return res;
 }
 
-namespace bob{
-  namespace machine{
-    /**
-     * @brief Prints a PLDABase in the output stream. This will print
-     * the values of the parameters \f$\mu\f$, \f$F\f$, \f$G\f$ and
-     * \f$\Sigma\f$ of the PLDA model.
-     */
-    std::ostream& operator<<(std::ostream& os, const PLDABase& m) {
-      os << "mu = " << m.m_mu << std::endl;
-      os << "sigma = " << m.m_sigma << std::endl;
-      os << "F = " << m.m_F << std::endl;
-      os << "G = " << m.m_G << std::endl;
-      return os;
-    }
+namespace bob { namespace learn { namespace misc {
+  /**
+   * @brief Prints a PLDABase in the output stream. This will print
+   * the values of the parameters \f$\mu\f$, \f$F\f$, \f$G\f$ and
+   * \f$\Sigma\f$ of the PLDA model.
+   */
+  std::ostream& operator<<(std::ostream& os, const PLDABase& m) {
+    os << "mu = " << m.m_mu << std::endl;
+    os << "sigma = " << m.m_sigma << std::endl;
+    os << "F = " << m.m_F << std::endl;
+    os << "G = " << m.m_G << std::endl;
+    return os;
   }
-}
+} } }
 
 
-bob::machine::PLDAMachine::PLDAMachine():
+bob::learn::misc::PLDAMachine::PLDAMachine():
   m_plda_base(),
   m_n_samples(0), m_nh_sum_xit_beta_xi(0), m_weighted_sum(0),
   m_loglikelihood(0), m_cache_gamma(), m_cache_loglike_constterm(),
@@ -598,7 +596,7 @@ bob::machine::PLDAMachine::PLDAMachine():
 {
 }
 
-bob::machine::PLDAMachine::PLDAMachine(const boost::shared_ptr<bob::machine::PLDABase> plda_base):
+bob::learn::misc::PLDAMachine::PLDAMachine(const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base):
   m_plda_base(plda_base),
   m_n_samples(0), m_nh_sum_xit_beta_xi(0), m_weighted_sum(plda_base->getDimF()),
   m_loglikelihood(0), m_cache_gamma(), m_cache_loglike_constterm()
@@ -607,7 +605,7 @@ bob::machine::PLDAMachine::PLDAMachine(const boost::shared_ptr<bob::machine::PLD
 }
 
 
-bob::machine::PLDAMachine::PLDAMachine(const bob::machine::PLDAMachine& other):
+bob::learn::misc::PLDAMachine::PLDAMachine(const bob::learn::misc::PLDAMachine& other):
   m_plda_base(other.m_plda_base),
   m_n_samples(other.m_n_samples),
   m_nh_sum_xit_beta_xi(other.m_nh_sum_xit_beta_xi),
@@ -619,18 +617,18 @@ bob::machine::PLDAMachine::PLDAMachine(const bob::machine::PLDAMachine& other):
   resizeTmp();
 }
 
-bob::machine::PLDAMachine::PLDAMachine(bob::io::base::HDF5File& config,
-    const boost::shared_ptr<bob::machine::PLDABase> plda_base):
+bob::learn::misc::PLDAMachine::PLDAMachine(bob::io::base::HDF5File& config,
+    const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base):
   m_plda_base(plda_base)
 {
   load(config);
 }
 
-bob::machine::PLDAMachine::~PLDAMachine() {
+bob::learn::misc::PLDAMachine::~PLDAMachine() {
 }
 
-bob::machine::PLDAMachine& bob::machine::PLDAMachine::operator=
-(const bob::machine::PLDAMachine& other)
+bob::learn::misc::PLDAMachine& bob::learn::misc::PLDAMachine::operator=
+(const bob::learn::misc::PLDAMachine& other)
 {
   if(this!=&other)
   {
@@ -646,8 +644,8 @@ bob::machine::PLDAMachine& bob::machine::PLDAMachine::operator=
   return *this;
 }
 
-bool bob::machine::PLDAMachine::operator==
-    (const bob::machine::PLDAMachine& b) const
+bool bob::learn::misc::PLDAMachine::operator==
+    (const bob::learn::misc::PLDAMachine& b) const
 {
   if (!(( (!m_plda_base && !b.m_plda_base) ||
           ((m_plda_base && b.m_plda_base) && *(m_plda_base) == *(b.m_plda_base))) &&
@@ -672,14 +670,14 @@ bool bob::machine::PLDAMachine::operator==
   return true;
 }
 
-bool bob::machine::PLDAMachine::operator!=
-    (const bob::machine::PLDAMachine& b) const
+bool bob::learn::misc::PLDAMachine::operator!=
+    (const bob::learn::misc::PLDAMachine& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::PLDAMachine::is_similar_to(
-  const bob::machine::PLDAMachine& b, const double r_epsilon,
+bool bob::learn::misc::PLDAMachine::is_similar_to(
+  const bob::learn::misc::PLDAMachine& b, const double r_epsilon,
   const double a_epsilon) const
 {
   return (( (!m_plda_base && !b.m_plda_base) ||
@@ -693,7 +691,7 @@ bool bob::machine::PLDAMachine::is_similar_to(
           bob::core::isClose(m_cache_loglike_constterm, b.m_cache_loglike_constterm, r_epsilon, a_epsilon));
 }
 
-void bob::machine::PLDAMachine::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::PLDAMachine::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   m_n_samples = config.read<uint64_t>("n_samples");
@@ -717,7 +715,7 @@ void bob::machine::PLDAMachine::load(bob::io::base::HDF5File& config)
   resizeTmp();
 }
 
-void bob::machine::PLDAMachine::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::PLDAMachine::save(bob::io::base::HDF5File& config) const
 {
   config.set("n_samples", m_n_samples);
   config.set("nh_sum_xit_beta_xi", m_nh_sum_xit_beta_xi);
@@ -743,7 +741,7 @@ void bob::machine::PLDAMachine::save(bob::io::base::HDF5File& config) const
   }
 }
 
-void bob::machine::PLDAMachine::setPLDABase(const boost::shared_ptr<bob::machine::PLDABase> plda_base)
+void bob::learn::misc::PLDAMachine::setPLDABase(const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base)
 {
   m_plda_base = plda_base;
   m_weighted_sum.resizeAndPreserve(getDimF());
@@ -752,7 +750,7 @@ void bob::machine::PLDAMachine::setPLDABase(const boost::shared_ptr<bob::machine
 }
 
 
-void bob::machine::PLDAMachine::setWeightedSum(const blitz::Array<double,1>& ws)
+void bob::learn::misc::PLDAMachine::setWeightedSum(const blitz::Array<double,1>& ws)
 {
   if(ws.extent(0) != m_weighted_sum.extent(0)) {
     boost::format m("size of parameter `ws' (%d) does not match the expected size (%d)");
@@ -762,7 +760,7 @@ void bob::machine::PLDAMachine::setWeightedSum(const blitz::Array<double,1>& ws)
   m_weighted_sum.reference(bob::core::array::ccopy(ws));
 }
 
-const blitz::Array<double,2>& bob::machine::PLDAMachine::getGamma(const size_t a) const
+const blitz::Array<double,2>& bob::learn::misc::PLDAMachine::getGamma(const size_t a) const
 {
   // Checks in both base machine and this machine
   if (m_plda_base->hasGamma(a)) return m_plda_base->getGamma(a);
@@ -771,7 +769,7 @@ const blitz::Array<double,2>& bob::machine::PLDAMachine::getGamma(const size_t a
   return (m_cache_gamma.find(a))->second;
 }
 
-const blitz::Array<double,2>& bob::machine::PLDAMachine::getAddGamma(const size_t a)
+const blitz::Array<double,2>& bob::learn::misc::PLDAMachine::getAddGamma(const size_t a)
 {
   if (m_plda_base->hasGamma(a)) return m_plda_base->getGamma(a);
   else if (hasGamma(a)) return m_cache_gamma[a];
@@ -782,7 +780,7 @@ const blitz::Array<double,2>& bob::machine::PLDAMachine::getAddGamma(const size_
   return m_cache_gamma[a];
 }
 
-double bob::machine::PLDAMachine::getLogLikeConstTerm(const size_t a) const
+double bob::learn::misc::PLDAMachine::getLogLikeConstTerm(const size_t a) const
 {
   // Checks in both base machine and this machine
   if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
@@ -792,7 +790,7 @@ double bob::machine::PLDAMachine::getLogLikeConstTerm(const size_t a) const
   return (m_cache_loglike_constterm.find(a))->second;
 }
 
-double bob::machine::PLDAMachine::getAddLogLikeConstTerm(const size_t a)
+double bob::learn::misc::PLDAMachine::getAddLogLikeConstTerm(const size_t a)
 {
   if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
   if (m_plda_base->hasLogLikeConstTerm(a)) return m_plda_base->getLogLikeConstTerm(a);
@@ -803,32 +801,32 @@ double bob::machine::PLDAMachine::getAddLogLikeConstTerm(const size_t a)
   return m_cache_loglike_constterm[a];
 }
 
-void bob::machine::PLDAMachine::clearMaps()
+void bob::learn::misc::PLDAMachine::clearMaps()
 {
   m_cache_gamma.clear();
   m_cache_loglike_constterm.clear();
 }
 
-void bob::machine::PLDAMachine::forward(const blitz::Array<double,1>& sample, double& score) const
+void bob::learn::misc::PLDAMachine::forward(const blitz::Array<double,1>& sample, double& score) const
 {
   forward_(sample,score);
 }
 
-void bob::machine::PLDAMachine::forward_(const blitz::Array<double,1>& sample, double& score) const
+void bob::learn::misc::PLDAMachine::forward_(const blitz::Array<double,1>& sample, double& score) const
 {
   // Computes the log likelihood ratio
   score = computeLogLikelihood(sample, true) - // match
           (computeLogLikelihood(sample, false) + m_loglikelihood); // no match
 }
 
-void bob::machine::PLDAMachine::forward(const blitz::Array<double,2>& samples, double& score) const
+void bob::learn::misc::PLDAMachine::forward(const blitz::Array<double,2>& samples, double& score) const
 {
   // Computes the log likelihood ratio
   score = computeLogLikelihood(samples, true) - // match
           (computeLogLikelihood(samples, false) + m_loglikelihood); // no match
 }
 
-double bob::machine::PLDAMachine::computeLogLikelihood(const blitz::Array<double,1>& sample,
+double bob::learn::misc::PLDAMachine::computeLogLikelihood(const blitz::Array<double,1>& sample,
   bool enrol) const
 {
   if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
@@ -883,7 +881,7 @@ double bob::machine::PLDAMachine::computeLogLikelihood(const blitz::Array<double
   return log_likelihood;
 }
 
-double bob::machine::PLDAMachine::computeLogLikelihood(const blitz::Array<double,2>& samples,
+double bob::learn::misc::PLDAMachine::computeLogLikelihood(const blitz::Array<double,2>& samples,
   bool enrol) const
 {
   if (!m_plda_base) throw std::runtime_error("No PLDABase set to this machine");
@@ -941,7 +939,7 @@ double bob::machine::PLDAMachine::computeLogLikelihood(const blitz::Array<double
   return log_likelihood;
 }
 
-void bob::machine::PLDAMachine::resize(const size_t dim_d, const size_t dim_f,
+void bob::learn::misc::PLDAMachine::resize(const size_t dim_d, const size_t dim_f,
   const size_t dim_g)
 {
   m_weighted_sum.resizeAndPreserve(dim_f);
@@ -949,7 +947,7 @@ void bob::machine::PLDAMachine::resize(const size_t dim_d, const size_t dim_f,
   resizeTmp();
 }
 
-void bob::machine::PLDAMachine::resizeTmp()
+void bob::learn::misc::PLDAMachine::resizeTmp()
 {
   if (m_plda_base)
   {
diff --git a/bob/learn/misc/cpp/PLDATrainer.cpp b/bob/learn/misc/cpp/PLDATrainer.cpp
index 98fcfbb..2c687f3 100644
--- a/bob/learn/misc/cpp/PLDATrainer.cpp
+++ b/bob/learn/misc/cpp/PLDATrainer.cpp
@@ -18,15 +18,15 @@
 #include <vector>
 #include <limits>
 
-bob::trainer::PLDATrainer::PLDATrainer(const size_t max_iterations,
+bob::learn::misc::PLDATrainer::PLDATrainer(const size_t max_iterations,
     const bool use_sum_second_order):
-  EMTrainer<bob::machine::PLDABase, std::vector<blitz::Array<double,2> > >
+  EMTrainer<bob::learn::misc::PLDABase, std::vector<blitz::Array<double,2> > >
     (0.001, max_iterations, false),
   m_dim_d(0), m_dim_f(0), m_dim_g(0),
   m_use_sum_second_order(use_sum_second_order),
-  m_initF_method(bob::trainer::PLDATrainer::RANDOM_F), m_initF_ratio(1.),
-  m_initG_method(bob::trainer::PLDATrainer::RANDOM_G), m_initG_ratio(1.),
-  m_initSigma_method(bob::trainer::PLDATrainer::RANDOM_SIGMA),
+  m_initF_method(bob::learn::misc::PLDATrainer::RANDOM_F), m_initF_ratio(1.),
+  m_initG_method(bob::learn::misc::PLDATrainer::RANDOM_G), m_initG_ratio(1.),
+  m_initSigma_method(bob::learn::misc::PLDATrainer::RANDOM_SIGMA),
   m_initSigma_ratio(1.),
   m_cache_S(0,0),
   m_cache_z_first_order(0), m_cache_sum_z_second_order(0,0), m_cache_z_second_order(0),
@@ -38,8 +38,8 @@ bob::trainer::PLDATrainer::PLDATrainer(const size_t max_iterations,
 {
 }
 
-bob::trainer::PLDATrainer::PLDATrainer(const bob::trainer::PLDATrainer& other):
-  EMTrainer<bob::machine::PLDABase, std::vector<blitz::Array<double,2> > >
+bob::learn::misc::PLDATrainer::PLDATrainer(const bob::learn::misc::PLDATrainer& other):
+  EMTrainer<bob::learn::misc::PLDABase, std::vector<blitz::Array<double,2> > >
     (other.m_convergence_threshold, other.m_max_iterations,
      other.m_compute_likelihood),
   m_dim_d(other.m_dim_d), m_dim_f(other.m_dim_f), m_dim_g(other.m_dim_g),
@@ -65,14 +65,14 @@ bob::trainer::PLDATrainer::PLDATrainer(const bob::trainer::PLDATrainer& other):
   resizeTmp();
 }
 
-bob::trainer::PLDATrainer::~PLDATrainer() {}
+bob::learn::misc::PLDATrainer::~PLDATrainer() {}
 
-bob::trainer::PLDATrainer& bob::trainer::PLDATrainer::operator=
-(const bob::trainer::PLDATrainer& other)
+bob::learn::misc::PLDATrainer& bob::learn::misc::PLDATrainer::operator=
+(const bob::learn::misc::PLDATrainer& other)
 {
   if(this != &other)
   {
-    bob::trainer::EMTrainer<bob::machine::PLDABase,
+    bob::learn::misc::EMTrainer<bob::learn::misc::PLDABase,
       std::vector<blitz::Array<double,2> > >::operator=(other);
     m_dim_d = other.m_dim_d;
     m_dim_f = other.m_dim_f;
@@ -100,10 +100,10 @@ bob::trainer::PLDATrainer& bob::trainer::PLDATrainer::operator=
   return *this;
 }
 
-bool bob::trainer::PLDATrainer::operator==
-  (const bob::trainer::PLDATrainer& other) const
+bool bob::learn::misc::PLDATrainer::operator==
+  (const bob::learn::misc::PLDATrainer& other) const
 {
-  return bob::trainer::EMTrainer<bob::machine::PLDABase,
+  return bob::learn::misc::EMTrainer<bob::learn::misc::PLDABase,
            std::vector<blitz::Array<double,2> > >::operator==(other) &&
          m_dim_d == other.m_dim_d &&
          m_dim_f == other.m_dim_f &&
@@ -129,17 +129,17 @@ bool bob::trainer::PLDATrainer::operator==
          bob::core::array::isEqual(m_cache_iota, other.m_cache_iota);
 }
 
-bool bob::trainer::PLDATrainer::operator!=
-  (const bob::trainer::PLDATrainer &other) const
+bool bob::learn::misc::PLDATrainer::operator!=
+  (const bob::learn::misc::PLDATrainer &other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::PLDATrainer::is_similar_to
-  (const bob::trainer::PLDATrainer &other, const double r_epsilon,
+bool bob::learn::misc::PLDATrainer::is_similar_to
+  (const bob::learn::misc::PLDATrainer &other, const double r_epsilon,
    const double a_epsilon) const
 {
-  return bob::trainer::EMTrainer<bob::machine::PLDABase,
+  return bob::learn::misc::EMTrainer<bob::learn::misc::PLDABase,
            std::vector<blitz::Array<double,2> > >::is_similar_to(other, r_epsilon, a_epsilon) &&
          m_dim_d == other.m_dim_d &&
          m_dim_f == other.m_dim_f &&
@@ -166,7 +166,7 @@ bool bob::trainer::PLDATrainer::is_similar_to
          bob::core::array::isClose(m_cache_iota, other.m_cache_iota, r_epsilon, a_epsilon);
 }
 
-void bob::trainer::PLDATrainer::initialize(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::initialize(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Checks training data
@@ -190,7 +190,7 @@ void bob::trainer::PLDATrainer::initialize(bob::machine::PLDABase& machine,
   initFGSigma(machine, v_ar);
 }
 
-void bob::trainer::PLDATrainer::finalize(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::finalize(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Precomputes constant parts of the log likelihood and (gamma_a)
@@ -200,7 +200,7 @@ void bob::trainer::PLDATrainer::finalize(bob::machine::PLDABase& machine,
   machine.getAddLogLikeConstTerm(1);
 }
 
-void bob::trainer::PLDATrainer::checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar)
+void bob::learn::misc::PLDATrainer::checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Checks that the vector of Arraysets is not empty
   if (v_ar.size() == 0) {
@@ -219,7 +219,7 @@ void bob::trainer::PLDATrainer::checkTrainingData(const std::vector<blitz::Array
   }
 }
 
-void bob::trainer::PLDATrainer::initMembers(const std::vector<blitz::Array<double,2> >& v_ar)
+void bob::learn::misc::PLDATrainer::initMembers(const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Gets dimension (first Arrayset)
   const size_t n_features = v_ar[0].extent(1); // dimensionality of the data
@@ -268,7 +268,7 @@ void bob::trainer::PLDATrainer::initMembers(const std::vector<blitz::Array<doubl
   resizeTmp();
 }
 
-void bob::trainer::PLDATrainer::resizeTmp()
+void bob::learn::misc::PLDATrainer::resizeTmp()
 {
   m_tmp_nf_1.resize(m_dim_f);
   m_tmp_nf_2.resize(m_dim_f);
@@ -280,7 +280,7 @@ void bob::trainer::PLDATrainer::resizeTmp()
   m_tmp_D_nfng_2.resize(m_dim_d, m_dim_f+m_dim_g);
 }
 
-void bob::trainer::PLDATrainer::computeMeanVariance(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::computeMeanVariance(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   blitz::Array<double,1>& mu = machine.updateMu();
@@ -312,7 +312,7 @@ void bob::trainer::PLDATrainer::computeMeanVariance(bob::machine::PLDABase& mach
   }
 }
 
-void bob::trainer::PLDATrainer::initFGSigma(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::initFGSigma(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Initializes F, G and sigma
@@ -324,14 +324,14 @@ void bob::trainer::PLDATrainer::initFGSigma(bob::machine::PLDABase& machine,
   machine.precompute();
 }
 
-void bob::trainer::PLDATrainer::initF(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::initF(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   blitz::Array<double,2>& F = machine.updateF();
   blitz::Range a = blitz::Range::all();
 
   // 1: between-class scatter
-  if (m_initF_method == bob::trainer::PLDATrainer::BETWEEN_SCATTER)
+  if (m_initF_method == bob::learn::misc::PLDATrainer::BETWEEN_SCATTER)
   {
     if (machine.getDimF() > v_ar.size()) {
       boost::format m("The rank of the matrix F ('%ld') can't be larger than the number of classes in the training set ('%ld')");
@@ -383,14 +383,14 @@ void bob::trainer::PLDATrainer::initF(bob::machine::PLDABase& machine,
   }
 }
 
-void bob::trainer::PLDATrainer::initG(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::initG(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   blitz::Array<double,2>& G = machine.updateG();
   blitz::Range a = blitz::Range::all();
 
   // 1: within-class scatter
-  if (m_initG_method == bob::trainer::PLDATrainer::WITHIN_SCATTER)
+  if (m_initG_method == bob::learn::misc::PLDATrainer::WITHIN_SCATTER)
   {
     // a/ Computes within-class scatter matrix
     blitz::firstIndex bi;
@@ -450,14 +450,14 @@ void bob::trainer::PLDATrainer::initG(bob::machine::PLDABase& machine,
   }
 }
 
-void bob::trainer::PLDATrainer::initSigma(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::initSigma(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   blitz::Array<double,1>& sigma = machine.updateSigma();
   blitz::Range a = blitz::Range::all();
 
   // 1: percentage of the variance of G
-  if (m_initSigma_method == bob::trainer::PLDATrainer::VARIANCE_G) {
+  if (m_initSigma_method == bob::learn::misc::PLDATrainer::VARIANCE_G) {
     const blitz::Array<double,2>& G = machine.getG();
     blitz::secondIndex bj;
     m_tmp_D_1 = blitz::mean(G, bj);
@@ -465,11 +465,11 @@ void bob::trainer::PLDATrainer::initSigma(bob::machine::PLDABase& machine,
     sigma = blitz::fabs(m_tmp_D_1) * m_initSigma_ratio;
   }
   // 2: constant value
-  else if (m_initSigma_method == bob::trainer::PLDATrainer::CONSTANT) {
+  else if (m_initSigma_method == bob::learn::misc::PLDATrainer::CONSTANT) {
     sigma = m_initSigma_ratio;
   }
   // 3: percentage of the variance of the data
-  else if (m_initSigma_method == bob::trainer::PLDATrainer::VARIANCE_DATA) {
+  else if (m_initSigma_method == bob::learn::misc::PLDATrainer::VARIANCE_DATA) {
     // a/ Computes the global mean
     //    m_tmp_D_1 = 1/N sum_i x_i
     m_tmp_D_1 = 0.;
@@ -499,7 +499,7 @@ void bob::trainer::PLDATrainer::initSigma(bob::machine::PLDABase& machine,
   machine.applyVarianceThreshold();
 }
 
-void bob::trainer::PLDATrainer::eStep(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::eStep(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Precomputes useful variables using current estimates of F,G, and sigma
@@ -594,7 +594,7 @@ void bob::trainer::PLDATrainer::eStep(bob::machine::PLDABase& machine,
   }
 }
 
-void bob::trainer::PLDATrainer::precomputeFromFGSigma(bob::machine::PLDABase& machine)
+void bob::learn::misc::PLDATrainer::precomputeFromFGSigma(bob::learn::misc::PLDABase& machine)
 {
   // Blitz compatibility: ugly fix (const_cast, as old blitz version does not
   // provide a non-const version of transpose())
@@ -636,7 +636,7 @@ void bob::trainer::PLDATrainer::precomputeFromFGSigma(bob::machine::PLDABase& ma
   }
 }
 
-void bob::trainer::PLDATrainer::precomputeLogLike(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::precomputeLogLike(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // Precomputes the log determinant of alpha and sigma
@@ -654,7 +654,7 @@ void bob::trainer::PLDATrainer::precomputeLogLike(bob::machine::PLDABase& machin
 }
 
 
-void bob::trainer::PLDATrainer::mStep(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::mStep(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   // 1/ New estimate of B = {F G}
@@ -669,7 +669,7 @@ void bob::trainer::PLDATrainer::mStep(bob::machine::PLDABase& machine,
   precomputeFromFGSigma(machine);
 }
 
-void bob::trainer::PLDATrainer::updateFG(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::updateFG(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   /// Computes the B matrix (B = [F G])
@@ -708,7 +708,7 @@ void bob::trainer::PLDATrainer::updateFG(bob::machine::PLDABase& machine,
   G = m_cache_B(a, blitz::Range(m_dim_f, m_dim_f+m_dim_g-1));
 }
 
-void bob::trainer::PLDATrainer::updateSigma(bob::machine::PLDABase& machine,
+void bob::learn::misc::PLDATrainer::updateSigma(bob::learn::misc::PLDABase& machine,
   const std::vector<blitz::Array<double,2> >& v_ar)
 {
   /// Computes the Sigma matrix
@@ -746,14 +746,14 @@ void bob::trainer::PLDATrainer::updateSigma(bob::machine::PLDABase& machine,
   machine.applyVarianceThreshold();
 }
 
-double bob::trainer::PLDATrainer::computeLikelihood(bob::machine::PLDABase& machine)
+double bob::learn::misc::PLDATrainer::computeLikelihood(bob::learn::misc::PLDABase& machine)
 {
   double llh = 0.;
   // TODO: implement log likelihood computation
   return llh;
 }
 
-void bob::trainer::PLDATrainer::enrol(bob::machine::PLDAMachine& plda_machine,
+void bob::learn::misc::PLDATrainer::enrol(bob::learn::misc::PLDAMachine& plda_machine,
   const blitz::Array<double,2>& ar) const
 {
   // Gets dimension
diff --git a/bob/learn/misc/cpp/WienerMachine.cpp b/bob/learn/misc/cpp/WienerMachine.cpp
index 921db05..d7540d8 100644
--- a/bob/learn/misc/cpp/WienerMachine.cpp
+++ b/bob/learn/misc/cpp/WienerMachine.cpp
@@ -13,7 +13,7 @@
 #include <bob.sp/FFT2D.h>
 #include <complex>
 
-bob::machine::WienerMachine::WienerMachine():
+bob::learn::misc::WienerMachine::WienerMachine():
   m_Ps(0,0),
   m_variance_threshold(1e-8),
   m_Pn(0),
@@ -24,7 +24,7 @@ bob::machine::WienerMachine::WienerMachine():
 {
 }
 
-bob::machine::WienerMachine::WienerMachine(const blitz::Array<double,2>& Ps,
+bob::learn::misc::WienerMachine::WienerMachine(const blitz::Array<double,2>& Ps,
     const double Pn, const double variance_threshold):
   m_Ps(bob::core::array::ccopy(Ps)),
   m_variance_threshold(variance_threshold),
@@ -38,7 +38,7 @@ bob::machine::WienerMachine::WienerMachine(const blitz::Array<double,2>& Ps,
   computeW();
 }
 
-bob::machine::WienerMachine::WienerMachine(const size_t height,
+bob::learn::misc::WienerMachine::WienerMachine(const size_t height,
     const size_t width, const double Pn, const double variance_threshold):
   m_Ps(height,width),
   m_variance_threshold(variance_threshold),
@@ -52,7 +52,7 @@ bob::machine::WienerMachine::WienerMachine(const size_t height,
   computeW();
 }
 
-bob::machine::WienerMachine::WienerMachine(const bob::machine::WienerMachine& other):
+bob::learn::misc::WienerMachine::WienerMachine(const bob::learn::misc::WienerMachine& other):
   m_Ps(bob::core::array::ccopy(other.m_Ps)),
   m_variance_threshold(other.m_variance_threshold),
   m_Pn(other.m_Pn),
@@ -64,15 +64,15 @@ bob::machine::WienerMachine::WienerMachine(const bob::machine::WienerMachine& ot
 {
 }
 
-bob::machine::WienerMachine::WienerMachine(bob::io::base::HDF5File& config)
+bob::learn::misc::WienerMachine::WienerMachine(bob::io::base::HDF5File& config)
 {
   load(config);
 }
 
-bob::machine::WienerMachine::~WienerMachine() {}
+bob::learn::misc::WienerMachine::~WienerMachine() {}
 
-bob::machine::WienerMachine& bob::machine::WienerMachine::operator=
-(const bob::machine::WienerMachine& other)
+bob::learn::misc::WienerMachine& bob::learn::misc::WienerMachine::operator=
+(const bob::learn::misc::WienerMachine& other)
 {
   if (this != &other)
   {
@@ -88,7 +88,7 @@ bob::machine::WienerMachine& bob::machine::WienerMachine::operator=
   return *this;
 }
 
-bool bob::machine::WienerMachine::operator==(const bob::machine::WienerMachine& b) const
+bool bob::learn::misc::WienerMachine::operator==(const bob::learn::misc::WienerMachine& b) const
 {
   return bob::core::array::isEqual(m_Ps, b.m_Ps) &&
          m_variance_threshold == b.m_variance_threshold &&
@@ -96,12 +96,12 @@ bool bob::machine::WienerMachine::operator==(const bob::machine::WienerMachine&
          bob::core::array::isEqual(m_W, b.m_W);
 }
 
-bool bob::machine::WienerMachine::operator!=(const bob::machine::WienerMachine& b) const
+bool bob::learn::misc::WienerMachine::operator!=(const bob::learn::misc::WienerMachine& b) const
 {
   return !(this->operator==(b));
 }
 
-bool bob::machine::WienerMachine::is_similar_to(const bob::machine::WienerMachine& b,
+bool bob::learn::misc::WienerMachine::is_similar_to(const bob::learn::misc::WienerMachine& b,
   const double r_epsilon, const double a_epsilon) const
 {
   return bob::core::array::isClose(m_Ps, b.m_Ps, r_epsilon, a_epsilon) &&
@@ -110,7 +110,7 @@ bool bob::machine::WienerMachine::is_similar_to(const bob::machine::WienerMachin
          bob::core::array::isClose(m_W, b.m_W, r_epsilon, a_epsilon);
 }
 
-void bob::machine::WienerMachine::load(bob::io::base::HDF5File& config)
+void bob::learn::misc::WienerMachine::load(bob::io::base::HDF5File& config)
 {
   //reads all data directly into the member variables
   m_Ps.reference(config.readArray<double,2>("Ps"));
@@ -123,7 +123,7 @@ void bob::machine::WienerMachine::load(bob::io::base::HDF5File& config)
   m_buffer2.resize(m_Ps.extent(0),m_Ps.extent(1));
 }
 
-void bob::machine::WienerMachine::resize(const size_t height,
+void bob::learn::misc::WienerMachine::resize(const size_t height,
   const size_t width)
 {
   m_Ps.resizeAndPreserve(height,width);
@@ -134,7 +134,7 @@ void bob::machine::WienerMachine::resize(const size_t height,
   m_buffer2.resizeAndPreserve(height,width);
 }
 
-void bob::machine::WienerMachine::save(bob::io::base::HDF5File& config) const
+void bob::learn::misc::WienerMachine::save(bob::io::base::HDF5File& config) const
 {
   config.setArray("Ps", m_Ps);
   config.set("Pn", m_Pn);
@@ -142,14 +142,14 @@ void bob::machine::WienerMachine::save(bob::io::base::HDF5File& config) const
   config.setArray("W", m_W);
 }
 
-void bob::machine::WienerMachine::computeW()
+void bob::learn::misc::WienerMachine::computeW()
 {
   // W = 1 / (1 + Pn / Ps_thresholded)
   m_W = 1. / (1. + m_Pn / m_Ps);
 }
 
 
-void bob::machine::WienerMachine::forward_(const blitz::Array<double,2>& input,
+void bob::learn::misc::WienerMachine::forward_(const blitz::Array<double,2>& input,
   blitz::Array<double,2>& output) const
 {
   m_fft(bob::core::array::cast<std::complex<double> >(input), m_buffer1);
@@ -158,7 +158,7 @@ void bob::machine::WienerMachine::forward_(const blitz::Array<double,2>& input,
   output = blitz::abs(m_buffer2);
 }
 
-void bob::machine::WienerMachine::forward(const blitz::Array<double,2>& input,
+void bob::learn::misc::WienerMachine::forward(const blitz::Array<double,2>& input,
   blitz::Array<double,2>& output) const
 {
   if (m_W.extent(0) != input.extent(0)) { //checks input
@@ -184,7 +184,7 @@ void bob::machine::WienerMachine::forward(const blitz::Array<double,2>& input,
   forward_(input, output);
 }
 
-void bob::machine::WienerMachine::setVarianceThreshold(
+void bob::learn::misc::WienerMachine::setVarianceThreshold(
   const double variance_threshold)
 {
   m_variance_threshold = variance_threshold;
@@ -192,7 +192,7 @@ void bob::machine::WienerMachine::setVarianceThreshold(
   computeW();
 }
 
-void bob::machine::WienerMachine::setPs(const blitz::Array<double,2>& Ps)
+void bob::learn::misc::WienerMachine::setPs(const blitz::Array<double,2>& Ps)
 {
   if (m_Ps.extent(0) != Ps.extent(0)) {
     boost::format m("number of rows (%d) for input `Ps' does not match the expected (internal) size (%d)");
@@ -208,7 +208,7 @@ void bob::machine::WienerMachine::setPs(const blitz::Array<double,2>& Ps)
   computeW();
 }
 
-void bob::machine::WienerMachine::applyVarianceThreshold()
+void bob::learn::misc::WienerMachine::applyVarianceThreshold()
 {
   m_Ps = blitz::where(m_Ps < m_variance_threshold, m_variance_threshold, m_Ps);
 }
diff --git a/bob/learn/misc/cpp/WienerTrainer.cpp b/bob/learn/misc/cpp/WienerTrainer.cpp
index 697082c..709e25f 100644
--- a/bob/learn/misc/cpp/WienerTrainer.cpp
+++ b/bob/learn/misc/cpp/WienerTrainer.cpp
@@ -10,44 +10,44 @@
 #include <bob.sp/FFT2D.h>
 #include <complex>
 
-bob::trainer::WienerTrainer::WienerTrainer()
+bob::learn::misc::WienerTrainer::WienerTrainer()
 {
 }
 
-bob::trainer::WienerTrainer::WienerTrainer(const bob::trainer::WienerTrainer& other)
+bob::learn::misc::WienerTrainer::WienerTrainer(const bob::learn::misc::WienerTrainer& other)
 {
 }
 
-bob::trainer::WienerTrainer::~WienerTrainer()
+bob::learn::misc::WienerTrainer::~WienerTrainer()
 {
 }
 
-bob::trainer::WienerTrainer& bob::trainer::WienerTrainer::operator=
-(const bob::trainer::WienerTrainer& other)
+bob::learn::misc::WienerTrainer& bob::learn::misc::WienerTrainer::operator=
+(const bob::learn::misc::WienerTrainer& other)
 {
   return *this;
 }
 
-bool bob::trainer::WienerTrainer::operator==
-  (const bob::trainer::WienerTrainer& other) const
+bool bob::learn::misc::WienerTrainer::operator==
+  (const bob::learn::misc::WienerTrainer& other) const
 {
   return true;
 }
 
-bool bob::trainer::WienerTrainer::operator!=
-  (const bob::trainer::WienerTrainer& other) const
+bool bob::learn::misc::WienerTrainer::operator!=
+  (const bob::learn::misc::WienerTrainer& other) const
 {
   return !(this->operator==(other));
 }
 
-bool bob::trainer::WienerTrainer::is_similar_to
-  (const bob::trainer::WienerTrainer& other, const double r_epsilon,
+bool bob::learn::misc::WienerTrainer::is_similar_to
+  (const bob::learn::misc::WienerTrainer& other, const double r_epsilon,
    const double a_epsilon) const
 {
   return true;
 }
 
-void bob::trainer::WienerTrainer::train(bob::machine::WienerMachine& machine,
+void bob::learn::misc::WienerTrainer::train(bob::learn::misc::WienerMachine& machine,
     const blitz::Array<double,3>& ar)
 {
   // Data is checked now and conforms, just proceed w/o any further checks.
diff --git a/bob/learn/misc/cpp/ZTNorm.cpp b/bob/learn/misc/cpp/ZTNorm.cpp
index 2a99eca..504f31c 100644
--- a/bob/learn/misc/cpp/ZTNorm.cpp
+++ b/bob/learn/misc/cpp/ZTNorm.cpp
@@ -10,178 +10,173 @@
 #include <bob.core/assert.h>
 #include <limits>
 
-namespace bob {
-namespace machine {
-
-namespace detail {
-  void ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
-              const blitz::Array<double,2>* rawscores_zprobes_vs_models,
-              const blitz::Array<double,2>* rawscores_probes_vs_tmodels,
-              const blitz::Array<double,2>* rawscores_zprobes_vs_tmodels,
-              const blitz::Array<bool,2>* mask_zprobes_vs_tmodels_istruetrial,
-              blitz::Array<double,2>& scores)
-  {
-    // Rename variables
-    const blitz::Array<double,2>& A = rawscores_probes_vs_models;
-    const blitz::Array<double,2>* B = rawscores_zprobes_vs_models;
-    const blitz::Array<double,2>* C = rawscores_probes_vs_tmodels;
-    const blitz::Array<double,2>* D = rawscores_zprobes_vs_tmodels;
-
-    // Compute the sizes
-    int size_eval  = A.extent(0);
-    int size_enrol = A.extent(1);
-    int size_tnorm = (C ? C->extent(0) : 0);
-    int size_znorm = (B ? B->extent(1) : 0);
-
-    // Check the inputs
-    bob::core::array::assertSameDimensionLength(A.extent(0), size_eval);
-    bob::core::array::assertSameDimensionLength(A.extent(1), size_enrol);
-
-    if (B) {
-      bob::core::array::assertSameDimensionLength(B->extent(1), size_znorm);
-      if (size_znorm > 0)
-        bob::core::array::assertSameDimensionLength(B->extent(0), size_eval);
-    }
 
-    if (C) {
-      bob::core::array::assertSameDimensionLength(C->extent(0), size_tnorm);
-      if (size_tnorm > 0)
-        bob::core::array::assertSameDimensionLength(C->extent(1), size_enrol);
-    }
+static void _ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+            const blitz::Array<double,2>* rawscores_zprobes_vs_models,
+            const blitz::Array<double,2>* rawscores_probes_vs_tmodels,
+            const blitz::Array<double,2>* rawscores_zprobes_vs_tmodels,
+            const blitz::Array<bool,2>* mask_zprobes_vs_tmodels_istruetrial,
+            blitz::Array<double,2>& scores)
+{
+  // Rename variables
+  const blitz::Array<double,2>& A = rawscores_probes_vs_models;
+  const blitz::Array<double,2>* B = rawscores_zprobes_vs_models;
+  const blitz::Array<double,2>* C = rawscores_probes_vs_tmodels;
+  const blitz::Array<double,2>* D = rawscores_zprobes_vs_tmodels;
+
+  // Compute the sizes
+  int size_eval  = A.extent(0);
+  int size_enrol = A.extent(1);
+  int size_tnorm = (C ? C->extent(0) : 0);
+  int size_znorm = (B ? B->extent(1) : 0);
+
+  // Check the inputs
+  bob::core::array::assertSameDimensionLength(A.extent(0), size_eval);
+  bob::core::array::assertSameDimensionLength(A.extent(1), size_enrol);
+
+  if (B) {
+    bob::core::array::assertSameDimensionLength(B->extent(1), size_znorm);
+    if (size_znorm > 0)
+      bob::core::array::assertSameDimensionLength(B->extent(0), size_eval);
+  }
 
-    if (D && size_znorm > 0 && size_tnorm > 0) {
-      bob::core::array::assertSameDimensionLength(D->extent(0), size_tnorm);
-      bob::core::array::assertSameDimensionLength(D->extent(1), size_znorm);
-    }
+  if (C) {
+    bob::core::array::assertSameDimensionLength(C->extent(0), size_tnorm);
+    if (size_tnorm > 0)
+      bob::core::array::assertSameDimensionLength(C->extent(1), size_enrol);
+  }
 
-    if (mask_zprobes_vs_tmodels_istruetrial) {
-      bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(0), size_tnorm);
-      bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(1), size_znorm);
-    }
+  if (D && size_znorm > 0 && size_tnorm > 0) {
+    bob::core::array::assertSameDimensionLength(D->extent(0), size_tnorm);
+    bob::core::array::assertSameDimensionLength(D->extent(1), size_znorm);
+  }
 
-    bob::core::array::assertSameDimensionLength(scores.extent(0), size_eval);
-    bob::core::array::assertSameDimensionLength(scores.extent(1), size_enrol);
-
-    // Declare needed IndexPlaceholder
-    blitz::firstIndex ii;
-    blitz::secondIndex jj;
-
-    // Constant to check if the std is close to 0.
-    const double eps = std::numeric_limits<double>::min();
-
-    // zA
-    blitz::Array<double,2> zA(A.shape());
-    if (B && size_znorm > 0) {
-      // Znorm  -->      zA  = (A - mean(B) ) / std(B)    [znorm on oringinal scores]
-      // mean(B)
-      blitz::Array<double,1> mean_B(blitz::mean(*B, jj));
-      // std(B)
-      blitz::Array<double,2> B2n(B->shape());
-      B2n = blitz::pow2((*B)(ii, jj) - mean_B(ii));
-      blitz::Array<double,1> std_B(B->extent(0));
-      if(size_znorm>1)
-        std_B = blitz::sqrt(blitz::sum(B2n, jj) / (size_znorm - 1));
-      else // 1 single value -> std = 0
-        std_B = 0;
-      std_B = blitz::where( std_B <= eps, 1., std_B);
+  if (mask_zprobes_vs_tmodels_istruetrial) {
+    bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(0), size_tnorm);
+    bob::core::array::assertSameDimensionLength(mask_zprobes_vs_tmodels_istruetrial->extent(1), size_znorm);
+  }
 
-      zA = (A(ii, jj) - mean_B(ii)) / std_B(ii);
-    }
-    else
-      zA = A;
-
-    blitz::Array<double,2> zC(size_tnorm, size_enrol);
-    if (D && size_tnorm > 0 && size_znorm > 0) {
-      blitz::Array<double,1> mean_Dimp(size_tnorm);
-      blitz::Array<double,1> std_Dimp(size_tnorm);
-
-      // Compute mean_Dimp and std_Dimp = D only with impostors
-      for (int i = 0; i < size_tnorm; ++i) {
-        double sum = 0;
-        double sumsq = 0;
-        double count = 0;
-        for (int j = 0; j < size_znorm; ++j) {
-          bool keep;
-          // The second part is never executed if mask_zprobes_vs_tmodels_istruetrial==NULL
-          keep = (mask_zprobes_vs_tmodels_istruetrial == NULL) || !(*mask_zprobes_vs_tmodels_istruetrial)(i, j); //tnorm_models_spk_ids(i) != znorm_tests_spk_ids(j);
-
-          double value = keep * (*D)(i, j);
-          sum += value;
-          sumsq += value*value;
-          count += keep;
-        }
-
-        double mean = sum / count;
-        mean_Dimp(i) = mean;
-        if (count > 1)
-          std_Dimp(i) = sqrt((sumsq - count * mean * mean) / (count -1));
-        else // 1 single value -> std = 0
-          std_Dimp(i) = 0;
+  bob::core::array::assertSameDimensionLength(scores.extent(0), size_eval);
+  bob::core::array::assertSameDimensionLength(scores.extent(1), size_enrol);
+
+  // Declare needed IndexPlaceholder
+  blitz::firstIndex ii;
+  blitz::secondIndex jj;
+
+  // Constant to check if the std is close to 0.
+  const double eps = std::numeric_limits<double>::min();
+
+  // zA
+  blitz::Array<double,2> zA(A.shape());
+  if (B && size_znorm > 0) {
+    // Znorm  -->      zA  = (A - mean(B) ) / std(B)    [znorm on oringinal scores]
+    // mean(B)
+    blitz::Array<double,1> mean_B(blitz::mean(*B, jj));
+    // std(B)
+    blitz::Array<double,2> B2n(B->shape());
+    B2n = blitz::pow2((*B)(ii, jj) - mean_B(ii));
+    blitz::Array<double,1> std_B(B->extent(0));
+    if(size_znorm>1)
+      std_B = blitz::sqrt(blitz::sum(B2n, jj) / (size_znorm - 1));
+    else // 1 single value -> std = 0
+      std_B = 0;
+    std_B = blitz::where( std_B <= eps, 1., std_B);
+
+    zA = (A(ii, jj) - mean_B(ii)) / std_B(ii);
+  }
+  else
+    zA = A;
+
+  blitz::Array<double,2> zC(size_tnorm, size_enrol);
+  if (D && size_tnorm > 0 && size_znorm > 0) {
+    blitz::Array<double,1> mean_Dimp(size_tnorm);
+    blitz::Array<double,1> std_Dimp(size_tnorm);
+
+    // Compute mean_Dimp and std_Dimp = D only with impostors
+    for (int i = 0; i < size_tnorm; ++i) {
+      double sum = 0;
+      double sumsq = 0;
+      double count = 0;
+      for (int j = 0; j < size_znorm; ++j) {
+        bool keep;
+        // The second part is never executed if mask_zprobes_vs_tmodels_istruetrial==NULL
+        keep = (mask_zprobes_vs_tmodels_istruetrial == NULL) || !(*mask_zprobes_vs_tmodels_istruetrial)(i, j); //tnorm_models_spk_ids(i) != znorm_tests_spk_ids(j);
+
+        double value = keep * (*D)(i, j);
+        sum += value;
+        sumsq += value*value;
+        count += keep;
       }
 
-      // zC  = (C - mean(D)) / std(D)     [znorm the tnorm scores]
-      std_Dimp = blitz::where( std_Dimp <= eps, 1., std_Dimp);
-      zC = ((*C)(ii, jj) - mean_Dimp(ii)) / std_Dimp(ii);
-    }
-    else if (C && size_tnorm > 0)
-      zC = *C;
-
-    if (C && size_tnorm > 0)
-    {
-      blitz::Array<double,1> mean_zC(size_enrol);
-      blitz::Array<double,1> std_zC(size_enrol);
-
-      // ztA = (zA - mean(zC)) / std(zC)  [ztnorm on eval scores]
-      mean_zC = blitz::mean(zC(jj, ii), jj);
-      if (size_tnorm > 1)
-        std_zC = sqrt(blitz::sum(pow(zC(jj, ii) - mean_zC(ii), 2) , jj) / (size_tnorm - 1));
+      double mean = sum / count;
+      mean_Dimp(i) = mean;
+      if (count > 1)
+        std_Dimp(i) = sqrt((sumsq - count * mean * mean) / (count -1));
       else // 1 single value -> std = 0
-        std_zC = 0;
-      std_zC = blitz::where( std_zC <= eps, 1., std_zC);
-
-      // Normalised scores
-      scores = (zA(ii, jj) - mean_zC(jj)) /  std_zC(jj);
+        std_Dimp(i) = 0;
     }
-    else
-      scores = zA;
+
+    // zC  = (C - mean(D)) / std(D)     [znorm the tnorm scores]
+    std_Dimp = blitz::where( std_Dimp <= eps, 1., std_Dimp);
+    zC = ((*C)(ii, jj) - mean_Dimp(ii)) / std_Dimp(ii);
+  }
+  else if (C && size_tnorm > 0)
+    zC = *C;
+
+  if (C && size_tnorm > 0)
+  {
+    blitz::Array<double,1> mean_zC(size_enrol);
+    blitz::Array<double,1> std_zC(size_enrol);
+
+    // ztA = (zA - mean(zC)) / std(zC)  [ztnorm on eval scores]
+    mean_zC = blitz::mean(zC(jj, ii), jj);
+    if (size_tnorm > 1)
+      std_zC = sqrt(blitz::sum(pow(zC(jj, ii) - mean_zC(ii), 2) , jj) / (size_tnorm - 1));
+    else // 1 single value -> std = 0
+      std_zC = 0;
+    std_zC = blitz::where( std_zC <= eps, 1., std_zC);
+
+    // Normalised scores
+    scores = (zA(ii, jj) - mean_zC(jj)) /  std_zC(jj);
   }
+  else
+    scores = zA;
 }
 
-void ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+void bob::learn::misc::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
             const blitz::Array<double,2>& rawscores_zprobes_vs_models,
             const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
             const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
             const blitz::Array<bool,2>& mask_zprobes_vs_tmodels_istruetrial,
             blitz::Array<double,2>& scores)
 {
-  detail::ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
+  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
                  &rawscores_zprobes_vs_tmodels, &mask_zprobes_vs_tmodels_istruetrial, scores);
 }
 
-void ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+void bob::learn::misc::ztNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
             const blitz::Array<double,2>& rawscores_zprobes_vs_models,
             const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
             const blitz::Array<double,2>& rawscores_zprobes_vs_tmodels,
             blitz::Array<double,2>& scores)
 {
-  detail::ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
+  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, &rawscores_probes_vs_tmodels,
                  &rawscores_zprobes_vs_tmodels, NULL, scores);
 }
 
-void tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+void bob::learn::misc::tNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
            const blitz::Array<double,2>& rawscores_probes_vs_tmodels,
            blitz::Array<double,2>& scores)
 {
-  detail::ztNorm(rawscores_probes_vs_models, NULL, &rawscores_probes_vs_tmodels,
+  _ztNorm(rawscores_probes_vs_models, NULL, &rawscores_probes_vs_tmodels,
                  NULL, NULL, scores);
 }
 
-void zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
+void bob::learn::misc::zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
            blitz::Array<double,2>& scores)
 {
-  detail::ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, NULL,
+  _ztNorm(rawscores_probes_vs_models, &rawscores_zprobes_vs_models, NULL,
                  NULL, NULL, scores);
 }
 
-}}
diff --git a/bob/learn/misc/include/bob.learn.misc/BICMachine.h b/bob/learn/misc/include/bob.learn.misc/BICMachine.h
index 21c6e88..827537b 100644
--- a/bob/learn/misc/include/bob.learn.misc/BICMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/BICMachine.h
@@ -6,19 +6,14 @@
  */
 
 
-#ifndef BOB_MACHINE_BICMACHINE_H
-#define BOB_MACHINE_BICMACHINE_H
+#ifndef BOB_LEARN_MISC_BICMACHINE_H
+#define BOB_LEARN_MISC_BICMACHINE_H
 
 #include <blitz/array.h>
 #include <bob.io.base/HDF5File.h>
 #include <bob.learn.misc/Machine.h>
 
-namespace bob { namespace machine {
-  /**
-   * @ingroup MACHINE
-   * @{
-   */
-
+namespace bob { namespace learn { namespace misc {
   /**
    * This class computes the Bayesian Intrapersonal/Extrapersonal Classifier (BIC),
    * (see "Beyond Eigenfaces: Probabilistic Matching for Face Recognition" from Moghaddam, Wahid and Pentland)
@@ -109,9 +104,6 @@ namespace bob { namespace machine {
 
   };
 
-  /**
-   * @}
-   */
-}}
+} } } // namespaces
 
-#endif // BOB_MACHINE_BICMACHINE_H
+#endif // BOB_LEARN_MISC_BICMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/BICTrainer.h b/bob/learn/misc/include/bob.learn.misc/BICTrainer.h
index 81e1353..3f8dabf 100644
--- a/bob/learn/misc/include/bob.learn.misc/BICTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/BICTrainer.h
@@ -5,16 +5,12 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_BICTRAINER_H
-#define BOB_TRAINER_BICTRAINER_H
+#ifndef BOB_LEARN_MISC_BICTRAINER_H
+#define BOB_LEARN_MISC_BICTRAINER_H
 
 #include <bob.learn.misc/BICMachine.h>
 
-namespace bob { namespace trainer {
-  /**
-   * @ingroup TRAINER
-   * @{
-   */
+namespace bob { namespace learn { namespace misc {
 
   class BICTrainer {
     public:
@@ -24,13 +20,13 @@ namespace bob { namespace trainer {
       BICTrainer(int intra_dim, int extra_dim) : m_M_I(intra_dim), m_M_E(extra_dim) {}
 
       //! trains the intrapersonal and extrapersonal classes of the given BICMachine
-      void train(bob::machine::BICMachine& machine, const blitz::Array<double,2>& intra_differences, const blitz::Array<double,2>& extra_differences) const {
+      void train(bob::learn::misc::BICMachine& machine, const blitz::Array<double,2>& intra_differences, const blitz::Array<double,2>& extra_differences) const {
         train_single(false, machine, intra_differences);
         train_single(true, machine, extra_differences);
       }
 
       //! trains the intrapersonal or the extrapersonal class of the given BICMachine
-      void train_single(bool clazz, bob::machine::BICMachine& machine, const blitz::Array<double,2>& differences) const;
+      void train_single(bool clazz, bob::learn::misc::BICMachine& machine, const blitz::Array<double,2>& differences) const;
 
     private:
 
@@ -39,10 +35,7 @@ namespace bob { namespace trainer {
       int m_M_I, m_M_E;
   };
 
-  /**
-   * @}
-   */
-}}
+} } } // namespaces
 
 
-#endif // BOB_TRAINER_BICTRAINER_H
+#endif // BOB_LEARN_MISC_BICTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h b/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h
index 81c5ae1..119968e 100644
--- a/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/EMPCATrainer.h
@@ -8,18 +8,14 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_EMPCA_TRAINER_H
-#define BOB_TRAINER_EMPCA_TRAINER_H
+#ifndef BOB_LEARN_MISC_EMPCA_TRAINER_H
+#define BOB_LEARN_MISC_EMPCA_TRAINER_H
 
 #include <bob.learn.misc/EMTrainer.h>
 #include <bob.learn.linear/machine.h>
 #include <blitz/array.h>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief Trains a linear machine using an Expectation-Maximization algorithm
@@ -189,9 +185,6 @@ class EMPCATrainer: public EMTrainer<bob::learn::linear::Machine, blitz::Array<d
        const blitz::Array<double,2>& ar);
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif /* BOB_TRAINER_EMPCA_TRAINER_H */
+#endif /* BOB_LEARN_MISC_EMPCA_TRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/EMTrainer.h b/bob/learn/misc/include/bob.learn.misc/EMTrainer.h
index 9cf5424..c989e5d 100644
--- a/bob/learn/misc/include/bob.learn.misc/EMTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/EMTrainer.h
@@ -9,8 +9,8 @@
  */
 
 
-#ifndef BOB_TRAINER_EMTRAINER_H
-#define BOB_TRAINER_EMTRAINER_H
+#ifndef BOB_LEARN_MISC_EMTRAINER_H
+#define BOB_LEARN_MISC_EMTRAINER_H
 
 #include <bob.learn.misc/Trainer.h>
 
@@ -21,12 +21,7 @@
 #include <boost/random.hpp>
 
 
-namespace bob { namespace trainer {
-  /**
-   * @ingroup TRAINER
-   * @{
-   */
-
+namespace bob { namespace learn { namespace misc {
   /**
    * @brief This class implements the general Expectation-maximization algorithm.
    * @details See Section 9.3 of Bishop, "Pattern recognition and machine learning", 2006
@@ -255,9 +250,6 @@ namespace bob { namespace trainer {
     }
   };
 
-  /**
-   * @}
-   */
-}}
+} } } // namespaces
 
-#endif // BOB_TRAINER_EMTRAINER_H
+#endif // BOB_LEARN_MISC_EMTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/GMMMachine.h b/bob/learn/misc/include/bob.learn.misc/GMMMachine.h
index 1b9acaf..93ce235 100644
--- a/bob/learn/misc/include/bob.learn.misc/GMMMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/GMMMachine.h
@@ -9,8 +9,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_GMMMACHINE_H
-#define BOB_MACHINE_GMMMACHINE_H
+#ifndef BOB_LEARN_MISC_GMMMACHINE_H
+#define BOB_LEARN_MISC_GMMMACHINE_H
 
 #include <bob.learn.misc/Machine.h>
 #include <bob.learn.misc/Gaussian.h>
@@ -20,11 +20,7 @@
 #include <boost/shared_ptr.hpp>
 #include <vector>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class implements a multivariate diagonal Gaussian distribution.
@@ -276,7 +272,7 @@ class GMMMachine: public Machine<blitz::Array<double,1>, double>
      * @return A smart pointer to the i'th Gaussian component
      *         if it exists, otherwise throws an exception
      */
-    boost::shared_ptr<const bob::machine::Gaussian> getGaussian(const size_t i) const;
+    boost::shared_ptr<const bob::learn::misc::Gaussian> getGaussian(const size_t i) const;
 
     /**
      * Get a pointer to a particular Gaussian component
@@ -284,7 +280,7 @@ class GMMMachine: public Machine<blitz::Array<double,1>, double>
      * @return A smart pointer to the i'th Gaussian component
      *         if it exists, otherwise throws an exception
      */
-    boost::shared_ptr<bob::machine::Gaussian> updateGaussian(const size_t i);
+    boost::shared_ptr<bob::learn::misc::Gaussian> updateGaussian(const size_t i);
 
 
     /**
@@ -373,9 +369,6 @@ class GMMMachine: public Machine<blitz::Array<double,1>, double>
 
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif
+#endif // BOB_LEARN_MISC_GMMMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/GMMStats.h b/bob/learn/misc/include/bob.learn.misc/GMMStats.h
index 7b3eae0..4dbef6c 100644
--- a/bob/learn/misc/include/bob.learn.misc/GMMStats.h
+++ b/bob/learn/misc/include/bob.learn.misc/GMMStats.h
@@ -6,17 +6,13 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_GMMSTATS_H
-#define BOB_MACHINE_GMMSTATS_H
+#ifndef BOB_LEARN_MISC_GMMSTATS_H
+#define BOB_LEARN_MISC_GMMSTATS_H
 
 #include <blitz/array.h>
 #include <bob.io.base/HDF5File.h>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief A container for GMM statistics.
@@ -140,9 +136,6 @@ class GMMStats {
     void copy(const GMMStats&);
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif
+#endif // BOB_LEARN_MISC_GMMSTATS_H
diff --git a/bob/learn/misc/include/bob.learn.misc/GMMTrainer.h b/bob/learn/misc/include/bob.learn.misc/GMMTrainer.h
index 1d164b5..fbbef1b 100644
--- a/bob/learn/misc/include/bob.learn.misc/GMMTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/GMMTrainer.h
@@ -8,19 +8,15 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_GMMTRAINER_H
-#define BOB_TRAINER_GMMTRAINER_H
+#ifndef BOB_LEARN_MISC_GMMTRAINER_H
+#define BOB_LEARN_MISC_GMMTRAINER_H
 
 #include <bob.learn.misc/EMTrainer.h>
 #include <bob.learn.misc/GMMMachine.h>
 #include <bob.learn.misc/GMMStats.h>
 #include <limits>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class implements the E-step of the expectation-maximisation
@@ -28,7 +24,7 @@ namespace bob { namespace trainer {
  * @details See Section 9.2.2 of Bishop,
  *   "Pattern recognition and machine learning", 2006
  */
-class GMMTrainer: public EMTrainer<bob::machine::GMMMachine, blitz::Array<double,2> >
+class GMMTrainer: public EMTrainer<bob::learn::misc::GMMMachine, blitz::Array<double,2> >
 {
   public:
     /**
@@ -52,7 +48,7 @@ class GMMTrainer: public EMTrainer<bob::machine::GMMMachine, blitz::Array<double
     /**
      * @brief Initialization before the EM steps
      */
-    virtual void initialize(bob::machine::GMMMachine& gmm,
+    virtual void initialize(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
@@ -64,19 +60,19 @@ class GMMTrainer: public EMTrainer<bob::machine::GMMMachine, blitz::Array<double
      * The statistics, m_ss, will be used in the mStep() that follows.
      * Implements EMTrainer::eStep(double &)
      */
-    virtual void eStep(bob::machine::GMMMachine& gmm,
+    virtual void eStep(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
      * @brief Computes the likelihood using current estimates of the latent
      * variables
      */
-    virtual double computeLikelihood(bob::machine::GMMMachine& gmm);
+    virtual double computeLikelihood(bob::learn::misc::GMMMachine& gmm);
 
     /**
      * @brief Finalization after the EM steps
      */
-    virtual void finalize(bob::machine::GMMMachine& gmm,
+    virtual void finalize(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
@@ -104,21 +100,21 @@ class GMMTrainer: public EMTrainer<bob::machine::GMMMachine, blitz::Array<double
      * @brief Returns the internal GMM statistics. Useful to parallelize the
      * E-step
      */
-    const bob::machine::GMMStats& getGMMStats() const
+    const bob::learn::misc::GMMStats& getGMMStats() const
     { return m_ss; }
 
     /**
      * @brief Sets the internal GMM statistics. Useful to parallelize the
      * E-step
      */
-    void setGMMStats(const bob::machine::GMMStats& stats);
+    void setGMMStats(const bob::learn::misc::GMMStats& stats);
 
   protected:
     /**
      * These are the sufficient statistics, calculated during the
      * E-step and used during the M-step
      */
-    bob::machine::GMMStats m_ss;
+    bob::learn::misc::GMMStats m_ss;
 
     /**
      * update means on each iteration
@@ -144,9 +140,6 @@ class GMMTrainer: public EMTrainer<bob::machine::GMMMachine, blitz::Array<double
     double m_mean_var_update_responsibilities_threshold;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif
+#endif // BOB_LEARN_MISC_GMMTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/Gaussian.h b/bob/learn/misc/include/bob.learn.misc/Gaussian.h
index fcfd57d..70412e9 100644
--- a/bob/learn/misc/include/bob.learn.misc/Gaussian.h
+++ b/bob/learn/misc/include/bob.learn.misc/Gaussian.h
@@ -6,19 +6,15 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_GAUSSIAN_H
-#define BOB_MACHINE_GAUSSIAN_H
+#ifndef BOB_LEARN_MISC_GAUSSIAN_H
+#define BOB_LEARN_MISC_GAUSSIAN_H
 
 #include <bob.learn.misc/Machine.h>
 #include <bob.io.base/HDF5File.h>
 #include <blitz/array.h>
 #include <limits>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class implements a multivariate diagonal Gaussian distribution.
@@ -204,7 +200,7 @@ class Gaussian: public Machine<blitz::Array<double,1>, double>
     /**
      * Prints a Gaussian in the output stream
      */
-    friend std::ostream& operator<<(std::ostream& os, const Gaussian& g);
+    friend std::ostream& operator<<(std::ostream& os, const bob::learn::misc::Gaussian& g);
 
 
   private:
@@ -264,8 +260,6 @@ class Gaussian: public Machine<blitz::Array<double,1>, double>
     size_t m_n_inputs;
 };
 
-/**
- * @}
- */
-}}
-#endif
+} } } // namespaces
+
+#endif // BOB_LEARN_MISC_GAUSSIAN_H
diff --git a/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h b/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h
index 93bf664..efff4b7 100644
--- a/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/IVectorMachine.h
@@ -5,8 +5,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_IVECTOR_H
-#define BOB_MACHINE_IVECTOR_H
+#ifndef BOB_LEARN_MISC_IVECTOR_MACHINE_H
+#define BOB_LEARN_MISC_IVECTOR_MACHINE_H
 
 #include <blitz/array.h>
 #include <bob.learn.misc/Machine.h>
@@ -14,11 +14,7 @@
 #include <bob.learn.misc/GMMStats.h>
 #include <bob.io.base/HDF5File.h>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief An IVectorMachine consists of a Total Variability subspace \f$T\f$
@@ -28,7 +24,7 @@ namespace bob { namespace machine {
  *    N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet,
  *   IEEE Trans. on Audio, Speech and Language Processing
  */
-class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz::Array<double,1> >
+class IVectorMachine: public bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >
 {
   public:
     /**
@@ -49,7 +45,7 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
      *   \f$\Sigma\f$ (diagonal) matrix
      * @warning rt SHOULD BE >= 1.
      */
-    IVectorMachine(const boost::shared_ptr<bob::machine::GMMMachine> ubm,
+    IVectorMachine(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm,
       const size_t rt=1, const double variance_threshold=1e-10);
 
     /**
@@ -102,7 +98,7 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
     /**
      * @brief Returns the UBM
      */
-    const boost::shared_ptr<bob::machine::GMMMachine> getUbm() const
+    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
     { return m_ubm; }
 
     /**
@@ -180,7 +176,7 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
      * @brief Sets (the mean supervector of) the Universal Background Model.
      * \f$T\f$ and \f$\Sigma\f$ are uninitialized in case of dimensions update (C or D)
      */
-    void setUbm(const boost::shared_ptr<bob::machine::GMMMachine> ubm);
+    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm);
 
     /**
      * @brief Sets the \f$T\f$ matrix
@@ -208,13 +204,13 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
      * @brief Computes \f$(Id + \sum_{c=1}^{C} N_{i,j,c} T^{T} \Sigma_{c}^{-1} T)\f$
      * @warning No check is perform
      */
-    void computeIdTtSigmaInvT(const bob::machine::GMMStats& input, blitz::Array<double,2>& output) const;
+    void computeIdTtSigmaInvT(const bob::learn::misc::GMMStats& input, blitz::Array<double,2>& output) const;
 
     /**
      * @brief Computes \f$T^{T} \Sigma^{-1} \sum_{c=1}^{C} (F_c - N_c ubmmean_{c})\f$
      * @warning No check is perform
      */
-    void computeTtSigmaInvFnorm(const bob::machine::GMMStats& input, blitz::Array<double,1>& output) const;
+    void computeTtSigmaInvFnorm(const bob::learn::misc::GMMStats& input, blitz::Array<double,1>& output) const;
 
     /**
      * @brief Extracts an ivector from the input GMM statistics
@@ -222,7 +218,7 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
      * @param input GMM statistics to be used by the machine
      * @param output I-vector computed by the machine
      */
-    void forward(const bob::machine::GMMStats& input, blitz::Array<double,1>& output) const;
+    void forward(const bob::learn::misc::GMMStats& input, blitz::Array<double,1>& output) const;
 
     /**
      * @brief Extracts an ivector from the input GMM statistics
@@ -231,7 +227,7 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
      * @param output I-vector computed by the machine
      * @warning Inputs are NOT checked
      */
-    void forward_(const bob::machine::GMMStats& input, blitz::Array<double,1>& output) const;
+    void forward_(const bob::learn::misc::GMMStats& input, blitz::Array<double,1>& output) const;
 
   protected:
     /**
@@ -254,7 +250,7 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
     void resizePrecompute();
 
     // UBM
-    boost::shared_ptr<bob::machine::GMMMachine> m_ubm;
+    boost::shared_ptr<bob::learn::misc::GMMMachine> m_ubm;
 
     // dimensionality
     size_t m_rt; ///< size of \f$T\f$ (CD x rt)
@@ -274,9 +270,6 @@ class IVectorMachine: public bob::machine::Machine<bob::machine::GMMStats, blitz
     mutable blitz::Array<double,2> m_tmp_tt;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif // BOB_MACHINE_IVECTORMACHINE_H
+#endif // BOB_LEARN_MISC_IVECTOR_MACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h b/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h
index 55bc687..91f28ac 100644
--- a/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/IVectorTrainer.h
@@ -5,8 +5,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_IVECTOR_H
-#define BOB_TRAINER_IVECTOR_H
+#ifndef BOB_LEARN_MISC_IVECTOR_TRAINER_H
+#define BOB_LEARN_MISC_IVECTOR_TRAINER_H
 
 #include <blitz/array.h>
 #include <bob.learn.misc/EMTrainer.h>
@@ -16,11 +16,7 @@
 #include <boost/random.hpp>
 #include <vector>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief An IVectorTrainer to learn a Total Variability subspace \f$T\f$
@@ -30,7 +26,7 @@ namespace bob { namespace trainer {
  *    N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet,
  *   IEEE Trans. on Audio, Speech and Language Processing
  */
-class IVectorTrainer: public bob::trainer::EMTrainer<bob::machine::IVectorMachine, std::vector<bob::machine::GMMStats> >
+class IVectorTrainer: public bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine, std::vector<bob::learn::misc::GMMStats> >
 {
   public:
     /**
@@ -53,8 +49,8 @@ class IVectorTrainer: public bob::trainer::EMTrainer<bob::machine::IVectorMachin
     /**
      * @brief Initialization before the EM loop
      */
-    virtual void initialize(bob::machine::IVectorMachine& ivector,
-      const std::vector<bob::machine::GMMStats>& data);
+    virtual void initialize(bob::learn::misc::IVectorMachine& ivector,
+      const std::vector<bob::learn::misc::GMMStats>& data);
 
     /**
      * @brief Calculates statistics across the dataset,
@@ -66,27 +62,27 @@ class IVectorTrainer: public bob::trainer::EMTrainer<bob::machine::IVectorMachin
      *
      * These statistics will be used in the mStep() that follows.
      */
-    virtual void eStep(bob::machine::IVectorMachine& ivector,
-      const std::vector<bob::machine::GMMStats>& data);
+    virtual void eStep(bob::learn::misc::IVectorMachine& ivector,
+      const std::vector<bob::learn::misc::GMMStats>& data);
 
     /**
      * @brief Maximisation step: Update the Total Variability matrix \f$T\f$
      * and \f$\Sigma\f$ if update_sigma is enabled.
      */
-    virtual void mStep(bob::machine::IVectorMachine& ivector,
-      const std::vector<bob::machine::GMMStats>& data);
+    virtual void mStep(bob::learn::misc::IVectorMachine& ivector,
+      const std::vector<bob::learn::misc::GMMStats>& data);
 
     /**
      * @brief Computes the likelihood using current estimates
      * @warning (currently unsupported)
      */
-    virtual double computeLikelihood(bob::machine::IVectorMachine& ivector);
+    virtual double computeLikelihood(bob::learn::misc::IVectorMachine& ivector);
 
     /**
      * @brief Finalization after the EM loop
      */
-    virtual void finalize(bob::machine::IVectorMachine& ivector,
-      const std::vector<bob::machine::GMMStats>& data);
+    virtual void finalize(bob::learn::misc::IVectorMachine& ivector,
+      const std::vector<bob::learn::misc::GMMStats>& data);
 
     /**
      * @brief Assigns from a different IVectorTrainer
@@ -159,9 +155,6 @@ class IVectorTrainer: public bob::trainer::EMTrainer<bob::machine::IVectorMachin
     mutable blitz::Array<double,2> m_tmp_tt2;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif // BOB_TRAINER_IVECTORTRAINER_H
+#endif // BOB_LEARN_MISC_IVECTOR_TRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/JFAMachine.h b/bob/learn/misc/include/bob.learn.misc/JFAMachine.h
index 4644364..a1fc17c 100644
--- a/bob/learn/misc/include/bob.learn.misc/JFAMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/JFAMachine.h
@@ -7,8 +7,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_FABASE_H
-#define BOB_MACHINE_FABASE_H
+#ifndef BOB_LEARN_MISC_FABASE_H
+#define BOB_LEARN_MISC_FABASE_H
 
 #include <stdexcept>
 
@@ -19,11 +19,7 @@
 #include <bob.io.base/HDF5File.h>
 #include <boost/shared_ptr.hpp>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief A FA Base class which contains U, V and D matrices
@@ -50,7 +46,7 @@ class FABase
      * @warning ru and rv SHOULD BE  >= 1. Just set U/V/D to zero if you want
      *   to ignore one subspace. This is the case for ISV.
      */
-    FABase(const boost::shared_ptr<bob::machine::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
+    FABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
 
     /**
      * @brief Copy constructor
@@ -86,7 +82,7 @@ class FABase
     /**
      * @brief Returns the UBM
      */
-    const boost::shared_ptr<bob::machine::GMMMachine> getUbm() const
+    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
     { return m_ubm; }
 
     /**
@@ -202,7 +198,7 @@ class FABase
      * @brief Sets (the mean supervector of) the Universal Background Model
      * U, V and d are uninitialized in case of dimensions update (C or D)
      */
-    void setUbm(const boost::shared_ptr<bob::machine::GMMMachine> ubm);
+    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm);
 
     /**
      * @brief Sets the U matrix
@@ -226,7 +222,7 @@ class FABase
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateX(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& x) const;
+    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const;
 
     /**
      * @brief Compute and put U^{T}.Sigma^{-1} matrix in cache
@@ -253,13 +249,13 @@ class FABase
      * @brief Computes (Id + U^T.Sigma^-1.U.N_{i,h}.U)^-1 =
      *   (Id + sum_{c=1..C} N_{i,h}.U_{c}^T.Sigma_{c}^-1.U_{c})^-1
      */
-    void computeIdPlusUSProdInv(const bob::machine::GMMStats& gmm_stats,
+    void computeIdPlusUSProdInv(const bob::learn::misc::GMMStats& gmm_stats,
       blitz::Array<double,2>& out) const;
     /**
      * @brief Computes Fn_x = sum_{sessions h}(N*(o - m))
      * (Normalised first order statistics)
      */
-    void computeFn_x(const bob::machine::GMMStats& gmm_stats,
+    void computeFn_x(const bob::learn::misc::GMMStats& gmm_stats,
       blitz::Array<double,1>& out) const;
     /**
      * @brief Estimates the value of x from the passed arguments
@@ -270,7 +266,7 @@ class FABase
 
 
     // UBM
-    boost::shared_ptr<bob::machine::GMMMachine> m_ubm;
+    boost::shared_ptr<bob::learn::misc::GMMMachine> m_ubm;
 
     // dimensionality
     size_t m_ru; // size of U (CD x ru)
@@ -319,7 +315,7 @@ class JFABase
      * @param rv size of U (CD x rv)
      * @warning ru and rv SHOULD BE  >= 1.
      */
-    JFABase(const boost::shared_ptr<bob::machine::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
+    JFABase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm, const size_t ru=1, const size_t rv=1);
 
     /**
      * @brief Copy constructor
@@ -374,7 +370,7 @@ class JFABase
     /**
      * @brief Returns the UBM
      */
-    const boost::shared_ptr<bob::machine::GMMMachine> getUbm() const
+    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
     { return m_base.getUbm(); }
 
     /**
@@ -469,7 +465,7 @@ class JFABase
      * @brief Sets (the mean supervector of) the Universal Background Model
      * U, V and d are uninitialized in case of dimensions update (C or D)
      */
-    void setUbm(const boost::shared_ptr<bob::machine::GMMMachine> ubm)
+    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
     { m_base.setUbm(ubm); }
 
     /**
@@ -496,7 +492,7 @@ class JFABase
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateX(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
     { m_base.estimateX(gmm_stats, x); }
 
     /**
@@ -510,13 +506,13 @@ class JFABase
     /**
      * @brief Returns the FABase member
      */
-    const bob::machine::FABase& getBase() const
+    const bob::learn::misc::FABase& getBase() const
     { return m_base; }
 
 
   private:
     // FABase
-    bob::machine::FABase m_base;
+    bob::learn::misc::FABase m_base;
 };
 
 
@@ -543,7 +539,7 @@ class ISVBase
      * @param ru size of U (CD x ru)
      * @warning ru SHOULD BE >= 1.
      */
-    ISVBase(const boost::shared_ptr<bob::machine::GMMMachine> ubm, const size_t ru=1);
+    ISVBase(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm, const size_t ru=1);
 
     /**
      * @brief Copy constructor
@@ -598,7 +594,7 @@ class ISVBase
     /**
      * @brief Returns the UBM
      */
-    const boost::shared_ptr<bob::machine::GMMMachine> getUbm() const
+    const boost::shared_ptr<bob::learn::misc::GMMMachine> getUbm() const
     { return m_base.getUbm(); }
 
     /**
@@ -676,7 +672,7 @@ class ISVBase
      * @brief Sets (the mean supervector of) the Universal Background Model
      * U, V and d are uninitialized in case of dimensions update (C or D)
      */
-    void setUbm(const boost::shared_ptr<bob::machine::GMMMachine> ubm)
+    void setUbm(const boost::shared_ptr<bob::learn::misc::GMMMachine> ubm)
     { m_base.setUbm(ubm); }
 
     /**
@@ -697,7 +693,7 @@ class ISVBase
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateX(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
     { m_base.estimateX(gmm_stats, x); }
 
     /**
@@ -711,13 +707,13 @@ class ISVBase
     /**
      * @brief Returns the FABase member
      */
-    const bob::machine::FABase& getBase() const
+    const bob::learn::misc::FABase& getBase() const
     { return m_base; }
 
 
   private:
     // FABase
-    bob::machine::FABase m_base;
+    bob::learn::misc::FABase m_base;
 };
 
 
@@ -727,7 +723,7 @@ class ISVBase
  *   (latent variables y and z)
  * TODO: add a reference to the journal articles
  */
-class JFAMachine: public Machine<bob::machine::GMMStats, double>
+class JFAMachine: public Machine<bob::learn::misc::GMMStats, double>
 {
   public:
     /**
@@ -742,7 +738,7 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
      *
      * @param jfa_base The JFABase associated with this machine
      */
-    JFAMachine(const boost::shared_ptr<bob::machine::JFABase> jfa_base);
+    JFAMachine(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base);
 
     /**
      * @brief Copy constructor
@@ -871,13 +867,13 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
     /**
      * @brief Returns the JFABase
      */
-    const boost::shared_ptr<bob::machine::JFABase> getJFABase() const
+    const boost::shared_ptr<bob::learn::misc::JFABase> getJFABase() const
     { return m_jfa_base; }
 
     /**
      * @brief Sets the JFABase
      */
-    void setJFABase(const boost::shared_ptr<bob::machine::JFABase> jfa_base);
+    void setJFABase(const boost::shared_ptr<bob::learn::misc::JFABase> jfa_base);
 
 
     /**
@@ -885,14 +881,14 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateX(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
     { m_jfa_base->estimateX(gmm_stats, x); }
     /**
      * @brief Estimates Ux from the GMM statistics considering the LPT
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateUx(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
+    void estimateUx(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
 
    /**
     * @brief Execute the machine
@@ -901,12 +897,12 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
     * @param score value computed by the machine
     * @warning Inputs are checked
     */
-    void forward(const bob::machine::GMMStats& input, double& score) const;
+    void forward(const bob::learn::misc::GMMStats& input, double& score) const;
     /**
      * @brief Computes a score for the given UBM statistics and given the
      * Ux vector
      */
-    void forward(const bob::machine::GMMStats& gmm_stats,
+    void forward(const bob::learn::misc::GMMStats& gmm_stats,
       const blitz::Array<double,1>& Ux, double& score) const;
 
     /**
@@ -916,7 +912,7 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
      * @param score value computed by the machine
      * @warning Inputs are NOT checked
      */
-    void forward_(const bob::machine::GMMStats& input, double& score) const;
+    void forward_(const bob::learn::misc::GMMStats& input, double& score) const;
 
   protected:
     /**
@@ -933,7 +929,7 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
     void updateCache();
 
     // UBM
-    boost::shared_ptr<bob::machine::JFABase> m_jfa_base;
+    boost::shared_ptr<bob::learn::misc::JFABase> m_jfa_base;
 
     // y and z vectors/factors learned during the enrolment procedure
     blitz::Array<double,1> m_y;
@@ -952,7 +948,7 @@ class JFAMachine: public Machine<bob::machine::GMMStats, double>
  *   U D matrices.
  * TODO: add a reference to the journal articles
  */
-class ISVMachine: public Machine<bob::machine::GMMStats, double>
+class ISVMachine: public Machine<bob::learn::misc::GMMStats, double>
 {
   public:
     /**
@@ -967,7 +963,7 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
      *
      * @param isv_base The ISVBase associated with this machine
      */
-    ISVMachine(const boost::shared_ptr<bob::machine::ISVBase> isv_base);
+    ISVMachine(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base);
 
     /**
      * @brief Copy constructor
@@ -1074,13 +1070,13 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
     /**
      * @brief Returns the ISVBase
      */
-    const boost::shared_ptr<bob::machine::ISVBase> getISVBase() const
+    const boost::shared_ptr<bob::learn::misc::ISVBase> getISVBase() const
     { return m_isv_base; }
 
     /**
      * @brief Sets the ISVBase
      */
-    void setISVBase(const boost::shared_ptr<bob::machine::ISVBase> isv_base);
+    void setISVBase(const boost::shared_ptr<bob::learn::misc::ISVBase> isv_base);
 
 
     /**
@@ -1088,14 +1084,14 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateX(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
+    void estimateX(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& x) const
     { m_isv_base->estimateX(gmm_stats, x); }
     /**
      * @brief Estimates Ux from the GMM statistics considering the LPT
      * assumption, that is the latent session variable x is approximated
      * using the UBM
      */
-    void estimateUx(const bob::machine::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
+    void estimateUx(const bob::learn::misc::GMMStats& gmm_stats, blitz::Array<double,1>& Ux);
 
    /**
     * @brief Execute the machine
@@ -1104,12 +1100,12 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
     * @param score value computed by the machine
     * @warning Inputs are checked
     */
-    void forward(const bob::machine::GMMStats& input, double& score) const;
+    void forward(const bob::learn::misc::GMMStats& input, double& score) const;
     /**
      * @brief Computes a score for the given UBM statistics and given the
      * Ux vector
      */
-    void forward(const bob::machine::GMMStats& gmm_stats,
+    void forward(const bob::learn::misc::GMMStats& gmm_stats,
       const blitz::Array<double,1>& Ux, double& score) const;
 
     /**
@@ -1119,7 +1115,7 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
      * @param score value computed by the machine
      * @warning Inputs are NOT checked
      */
-    void forward_(const bob::machine::GMMStats& input, double& score) const;
+    void forward_(const bob::learn::misc::GMMStats& input, double& score) const;
 
   protected:
     /**
@@ -1136,7 +1132,7 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
     void resizeTmp();
 
     // UBM
-    boost::shared_ptr<bob::machine::ISVBase> m_isv_base;
+    boost::shared_ptr<bob::learn::misc::ISVBase> m_isv_base;
 
     // y and z vectors/factors learned during the enrolment procedure
     blitz::Array<double,1> m_z;
@@ -1149,10 +1145,6 @@ class ISVMachine: public Machine<bob::machine::GMMStats, double>
     mutable blitz::Array<double,1> m_tmp_Ux;
 };
 
+} } } // namespaces
 
-/**
- * @}
- */
-}}
-
-#endif // BOB_MACHINE_FABASE_H
+#endif // BOB_LEARN_MISC_FABASE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/JFATrainer.h b/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
index 7519b05..3cfa08d 100644
--- a/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/JFATrainer.h
@@ -7,8 +7,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_JFATRAINER_H
-#define BOB_TRAINER_JFATRAINER_H
+#ifndef BOB_LEARN_MISC_JFATRAINER_H
+#define BOB_LEARN_MISC_JFATRAINER_H
 
 #include <blitz/array.h>
 #include <bob.learn.misc/EMTrainer.h>
@@ -23,7 +23,7 @@
 #include <boost/random.hpp>
 #include <bob.core/logging.h>
 
-namespace bob { namespace trainer {
+namespace bob { namespace learn { namespace misc {
 
 class FABaseTrainer
 {
@@ -46,32 +46,32 @@ class FABaseTrainer
     /**
      * @brief Check that the dimensionality of the statistics match.
      */
-    void checkStatistics(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void checkStatistics(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
 
     /**
      * @brief Initialize the dimensionality, the UBM, the sums of the
      * statistics and the number of identities.
      */
-    void initUbmNidSumStatistics(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void initUbmNidSumStatistics(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
 
     /**
      * @brief Precomputes the sums of the zeroth order statistics over the
      * sessions for each client
      */
-    void precomputeSumStatisticsN(const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void precomputeSumStatisticsN(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Precomputes the sums of the first order statistics over the
      * sessions for each client
      */
-    void precomputeSumStatisticsF(const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void precomputeSumStatisticsF(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
 
     /**
      * @brief Initializes (allocates and sets to zero) the x, y, z speaker
      * factors
      */
-    void initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void initializeXYZ(const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
 
     /**
      * @brief Resets the x, y, z speaker factors to zero values
@@ -83,11 +83,11 @@ class FABaseTrainer
     /**
      * @brief Computes Vt * diag(sigma)^-1
      */
-    void computeVtSigmaInv(const bob::machine::FABase& m);
+    void computeVtSigmaInv(const bob::learn::misc::FABase& m);
     /**
      * @brief Computes Vt_{c} * diag(sigma)^-1 * V_{c} for each Gaussian c
      */
-    void computeVProd(const bob::machine::FABase& m);
+    void computeVProd(const bob::learn::misc::FABase& m);
     /**
      * @brief Computes (I+Vt*diag(sigma)^-1*Ni*V)^-1 which occurs in the y
      * estimation for the given person
@@ -97,8 +97,8 @@ class FABaseTrainer
      * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
      * which occurs in the y estimation of the given person
      */
-    void computeFn_y_i(const bob::machine::FABase& m,
-      const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& stats,
+    void computeFn_y_i(const bob::learn::misc::FABase& m,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats,
       const size_t id);
     /**
      * @brief Updates y_i (of the current person) and the accumulators to
@@ -109,14 +109,14 @@ class FABaseTrainer
     /**
      * @brief Updates y and the accumulators to compute V
      */
-    void updateY(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void updateY(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Computes the accumulators m_acc_V_A1 and m_acc_V_A2 for V
      * V = A2 * A1^-1
      */
-    void computeAccumulatorsV(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void computeAccumulatorsV(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Updates V from the accumulators m_acc_V_A1 and m_acc_V_A2
      */
@@ -127,22 +127,22 @@ class FABaseTrainer
     /**
      * @brief Computes Ut * diag(sigma)^-1
      */
-    void computeUtSigmaInv(const bob::machine::FABase& m);
+    void computeUtSigmaInv(const bob::learn::misc::FABase& m);
     /**
      * @brief Computes Ut_{c} * diag(sigma)^-1 * U_{c} for each Gaussian c
      */
-    void computeUProd(const bob::machine::FABase& m);
+    void computeUProd(const bob::learn::misc::FABase& m);
     /**
      * @brief Computes (I+Ut*diag(sigma)^-1*Ni*U)^-1 which occurs in the x
      * estimation
      */
-    void computeIdPlusUProd_ih(const boost::shared_ptr<bob::machine::GMMStats>& stats);
+    void computeIdPlusUProd_ih(const boost::shared_ptr<bob::learn::misc::GMMStats>& stats);
     /**
      * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - D*z_{i} - U*x_{i,h})
      * which occurs in the y estimation of the given person
      */
-    void computeFn_x_ih(const bob::machine::FABase& m,
-      const boost::shared_ptr<bob::machine::GMMStats>& stats, const size_t id);
+    void computeFn_x_ih(const bob::learn::misc::FABase& m,
+      const boost::shared_ptr<bob::learn::misc::GMMStats>& stats, const size_t id);
     /**
      * @brief Updates x_ih (of the current person/session) and the
      * accumulators to compute U with the cache values m_cache_IdPlusVprod_i,
@@ -152,14 +152,14 @@ class FABaseTrainer
     /**
      * @brief Updates x
      */
-    void updateX(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void updateX(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Computes the accumulators m_acc_U_A1 and m_acc_U_A2 for U
      * U = A2 * A1^-1
      */
-    void computeAccumulatorsU(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void computeAccumulatorsU(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Updates U from the accumulators m_acc_U_A1 and m_acc_U_A2
      */
@@ -170,11 +170,11 @@ class FABaseTrainer
     /**
      * @brief Computes diag(D) * diag(sigma)^-1
      */
-    void computeDtSigmaInv(const bob::machine::FABase& m);
+    void computeDtSigmaInv(const bob::learn::misc::FABase& m);
     /**
      * @brief Computes Dt_{c} * diag(sigma)^-1 * D_{c} for each Gaussian c
      */
-    void computeDProd(const bob::machine::FABase& m);
+    void computeDProd(const bob::learn::misc::FABase& m);
     /**
      * @brief Computes (I+diag(d)t*diag(sigma)^-1*Ni*diag(d))^-1 which occurs
      * in the z estimation for the given person
@@ -184,8 +184,8 @@ class FABaseTrainer
      * @brief Computes sum_{sessions h}(N_{i,h}*(o_{i,h} - m - V*y_{i} - U*x_{i,h})
      * which occurs in the y estimation of the given person
      */
-    void computeFn_z_i(const bob::machine::FABase& m,
-      const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& stats, const size_t id);
+    void computeFn_z_i(const bob::learn::misc::FABase& m,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& stats, const size_t id);
     /**
      * @brief Updates z_i (of the current person) and the accumulators to
      * compute D with the cache values m_cache_IdPlusDProd_i, m_VtSigmaInv
@@ -195,14 +195,14 @@ class FABaseTrainer
     /**
      * @brief Updates z and the accumulators to compute D
      */
-    void updateZ(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void updateZ(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Computes the accumulators m_acc_D_A1 and m_acc_D_A2 for d
      * d = A2 * A1^-1
      */
-    void computeAccumulatorsD(const bob::machine::FABase& m,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& stats);
+    void computeAccumulatorsD(const bob::learn::misc::FABase& m,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& stats);
     /**
      * @brief Updates d from the accumulators m_acc_D_A1 and m_acc_D_A2
      */
@@ -399,74 +399,74 @@ class JFATrainer
     /**
      * @brief This methods performs some initialization before the EM loop.
      */
-    virtual void initialize(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void initialize(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief This methods performs the e-Step to train the first subspace V
      */
-    virtual void eStep1(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void eStep1(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the m-Step to train the first subspace V
      */
-    virtual void mStep1(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void mStep1(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the finalization after training the first
      * subspace V
      */
-    virtual void finalize1(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void finalize1(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the e-Step to train the second subspace U
      */
-    virtual void eStep2(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void eStep2(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the m-Step to train the second subspace U
      */
-    virtual void mStep2(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void mStep2(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the finalization after training the second
      * subspace U
      */
-    virtual void finalize2(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void finalize2(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the e-Step to train the third subspace d
      */
-    virtual void eStep3(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void eStep3(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the m-Step to train the third subspace d
      */
-    virtual void mStep3(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void mStep3(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs the finalization after training the third
      * subspace d
      */
-    virtual void finalize3(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void finalize3(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief This methods performs the main loops to train the subspaces U, V and d
      */
-    virtual void train_loop(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void train_loop(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods trains the subspaces U, V and d
      */
-    virtual void train(bob::machine::JFABase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void train(bob::learn::misc::JFABase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief Enrol a client
      */
-    void enrol(bob::machine::JFAMachine& machine,
-      const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& features,
+    void enrol(bob::learn::misc::JFAMachine& machine,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& features,
       const size_t n_iter);
 
     /**
@@ -550,11 +550,11 @@ class JFATrainer
     // Attributes
     size_t m_max_iterations;
     boost::shared_ptr<boost::mt19937> m_rng; ///< The random number generator for the inialization
-    bob::trainer::FABaseTrainer m_base_trainer;
+    bob::learn::misc::FABaseTrainer m_base_trainer;
 };
 
 
-class ISVTrainer: public EMTrainer<bob::machine::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > >
+class ISVTrainer: public EMTrainer<bob::learn::misc::ISVBase, std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > >
 {
   public:
     /**
@@ -596,39 +596,39 @@ class ISVTrainer: public EMTrainer<bob::machine::ISVBase, std::vector<std::vecto
     /**
      * @brief This methods performs some initialization before the EM loop.
      */
-    virtual void initialize(bob::machine::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void initialize(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
     /**
      * @brief This methods performs some actions after the EM loop.
      */
-    virtual void finalize(bob::machine::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void finalize(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief Calculates and saves statistics across the dataset
      * The statistics will be used in the mStep() that follows.
      */
-    virtual void eStep(bob::machine::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void eStep(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief Performs a maximization step to update the parameters of the
      * factor analysis model.
      */
-    virtual void mStep(bob::machine::ISVBase& machine,
-      const std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& ar);
+    virtual void mStep(bob::learn::misc::ISVBase& machine,
+      const std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& ar);
 
     /**
      * @brief Computes the average log likelihood using the current estimates
      * of the latent variables.
      */
-    virtual double computeLikelihood(bob::machine::ISVBase& machine);
+    virtual double computeLikelihood(bob::learn::misc::ISVBase& machine);
 
     /**
      * @brief Enrol a client
      */
-    void enrol(bob::machine::ISVMachine& machine,
-      const std::vector<boost::shared_ptr<bob::machine::GMMStats> >& features,
+    void enrol(bob::learn::misc::ISVMachine& machine,
+      const std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >& features,
       const size_t n_iter);
 
     /**
@@ -674,14 +674,13 @@ class ISVTrainer: public EMTrainer<bob::machine::ISVBase, std::vector<std::vecto
     /**
      * @brief Initialize D to sqrt(ubm_var/relevance_factor)
      */
-    void initializeD(bob::machine::ISVBase& machine) const;
+    void initializeD(bob::learn::misc::ISVBase& machine) const;
 
     // Attributes
-    bob::trainer::FABaseTrainer m_base_trainer;
+    bob::learn::misc::FABaseTrainer m_base_trainer;
     double m_relevance_factor;
 };
 
+} } } // namespaces
 
-}}
-
-#endif /* BOB_TRAINER_FATRAINER_H */
+#endif /* BOB_LEARN_MISC_FATRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h b/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h
index 38413c1..3f12213 100644
--- a/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/KMeansMachine.h
@@ -5,8 +5,8 @@
  *
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
-#ifndef BOB_MACHINE_KMEANSMACHINE_H
-#define BOB_MACHINE_KMEANSMACHINE_H
+#ifndef BOB_LEARN_MISC_KMEANSMACHINE_H
+#define BOB_LEARN_MISC_KMEANSMACHINE_H
 
 #include <blitz/array.h>
 #include <cfloat>
@@ -14,11 +14,7 @@
 #include <bob.io.base/HDF5File.h>
 #include <bob.learn.misc/Machine.h>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class implements a k-means classifier.
@@ -244,8 +240,6 @@ class KMeansMachine: public Machine<blitz::Array<double,1>, double> {
     mutable blitz::Array<double,2> m_cache_means;
 };
 
-/**
- * @}
- */
-}}
-#endif
+} } } // namespaces
+
+#endif // BOB_LEARN_MISC_KMEANSMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h b/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h
index 52d5a14..110b82a 100644
--- a/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/KMeansTrainer.h
@@ -5,18 +5,14 @@
  *
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
-#ifndef BOB_TRAINER_KMEANSTRAINER_H
-#define BOB_TRAINER_KMEANSTRAINER_H
+#ifndef BOB_LEARN_MISC_KMEANSTRAINER_H
+#define BOB_LEARN_MISC_KMEANSTRAINER_H
 
 #include <bob.learn.misc/KMeansMachine.h>
 #include <bob.learn.misc/EMTrainer.h>
 #include <boost/version.hpp>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * Trains a KMeans machine.
@@ -24,7 +20,7 @@ namespace bob { namespace trainer {
  * @details See Section 9.1 of Bishop, "Pattern recognition and machine learning", 2006
  *          It uses a random initialisation of the means followed by the expectation-maximization algorithm
  */
-class KMeansTrainer: public EMTrainer<bob::machine::KMeansMachine, blitz::Array<double,2> >
+class KMeansTrainer: public EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> >
 {
   public:
     /**
@@ -89,7 +85,7 @@ class KMeansTrainer: public EMTrainer<bob::machine::KMeansMachine, blitz::Array<
      * Data is split into as many chunks as there are means,
      * then each mean is set to a random example within each chunk.
      */
-    virtual void initialize(bob::machine::KMeansMachine& kMeansMachine,
+    virtual void initialize(bob::learn::misc::KMeansMachine& kMeansMachine,
       const blitz::Array<double,2>& sampler);
 
     /**
@@ -98,31 +94,31 @@ class KMeansTrainer: public EMTrainer<bob::machine::KMeansMachine, blitz::Array<
      * - average (Square Euclidean) distance from the closest mean
      * Implements EMTrainer::eStep(double &)
      */
-    virtual void eStep(bob::machine::KMeansMachine& kmeans,
+    virtual void eStep(bob::learn::misc::KMeansMachine& kmeans,
       const blitz::Array<double,2>& data);
 
     /**
      * @brief Updates the mean based on the statistics from the E-step.
      */
-    virtual void mStep(bob::machine::KMeansMachine& kmeans,
+    virtual void mStep(bob::learn::misc::KMeansMachine& kmeans,
       const blitz::Array<double,2>&);
 
     /**
      * @brief This functions returns the average min (Square Euclidean)
      * distance (average distance to the closest mean)
      */
-    virtual double computeLikelihood(bob::machine::KMeansMachine& kmeans);
+    virtual double computeLikelihood(bob::learn::misc::KMeansMachine& kmeans);
 
     /**
      * @brief Function called at the end of the training
      */
-    virtual void finalize(bob::machine::KMeansMachine& kMeansMachine, const blitz::Array<double,2>& sampler);
+    virtual void finalize(bob::learn::misc::KMeansMachine& kMeansMachine, const blitz::Array<double,2>& sampler);
 
     /**
      * @brief Reset the statistics accumulators
      * to the correct size and a value of zero.
      */
-    bool resetAccumulators(bob::machine::KMeansMachine& kMeansMachine);
+    bool resetAccumulators(bob::learn::misc::KMeansMachine& kMeansMachine);
 
     /**
      * @brief Sets the Random Number Generator
@@ -192,9 +188,6 @@ class KMeansTrainer: public EMTrainer<bob::machine::KMeansMachine, blitz::Array<
     blitz::Array<double,2> m_firstOrderStats;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif // BOB_TRAINER_KMEANSTRAINER_H
+#endif // BOB_LEARN_MISC_KMEANSTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/LinearScoring.h b/bob/learn/misc/include/bob.learn.misc/LinearScoring.h
index 353dbf5..aeac718 100644
--- a/bob/learn/misc/include/bob.learn.misc/LinearScoring.h
+++ b/bob/learn/misc/include/bob.learn.misc/LinearScoring.h
@@ -4,19 +4,15 @@
  *
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
-#ifndef BOB_MACHINE_LINEARSCORING_H
-#define BOB_MACHINE_LINEARSCORING_H
+#ifndef BOB_LEARN_MISC_LINEARSCORING_H
+#define BOB_LEARN_MISC_LINEARSCORING_H
 
 #include <blitz/array.h>
 #include <boost/shared_ptr.hpp>
 #include <vector>
 #include <bob.learn.misc/GMMMachine.h>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * Compute a matrix of scores using linear scoring.
@@ -34,13 +30,13 @@ namespace bob { namespace machine {
  */
 void linearScoring(const std::vector<blitz::Array<double,1> >& models,
                    const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const std::vector<blitz::Array<double, 1> >& test_channelOffset,
                    const bool frame_length_normalisation,
                    blitz::Array<double,2>& scores);
 void linearScoring(const std::vector<blitz::Array<double,1> >& models,
                    const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const bool frame_length_normalisation,
                    blitz::Array<double,2>& scores);
 
@@ -56,9 +52,9 @@ void linearScoring(const std::vector<blitz::Array<double,1> >& models,
  * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
  * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
  */
-void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMachine> >& models,
-                   const bob::machine::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+void linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
+                   const bob::learn::misc::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const bool frame_length_normalisation,
                    blitz::Array<double,2>& scores);
 /**
@@ -74,9 +70,9 @@ void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMa
  * @param[out] scores 2D matrix of scores, <tt>scores[m, s]</tt> is the score for model @c m against statistics @c s
  * @warning the output scores matrix should have the correct size (number of models x number of test_stats)
  */
-void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMachine> >& models,
-                   const bob::machine::GMMMachine& ubm,
-                   const std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats,
+void linearScoring(const std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models,
+                   const bob::learn::misc::GMMMachine& ubm,
+                   const std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats,
                    const std::vector<blitz::Array<double, 1> >& test_channelOffset,
                    const bool frame_length_normalisation,
                    blitz::Array<double,2>& scores);
@@ -93,13 +89,10 @@ void linearScoring(const std::vector<boost::shared_ptr<const bob::machine::GMMMa
  */
 double linearScoring(const blitz::Array<double,1>& model,
                    const blitz::Array<double,1>& ubm_mean, const blitz::Array<double,1>& ubm_variance,
-                   const bob::machine::GMMStats& test_stats,
+                   const bob::learn::misc::GMMStats& test_stats,
                    const blitz::Array<double,1>& test_channelOffset,
                    const bool frame_length_normalisation);
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif // BOB_MACHINE_LINEARSCORING_H
+#endif // BOB_LEARN_MISC_LINEARSCORING_H
diff --git a/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h b/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h
index 03790bc..6eddb44 100644
--- a/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/MAP_GMMTrainer.h
@@ -8,17 +8,13 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_MAP_GMMTRAINER_H
-#define BOB_TRAINER_MAP_GMMTRAINER_H
+#ifndef BOB_LEARN_MISC_MAP_GMMTRAINER_H
+#define BOB_LEARN_MISC_MAP_GMMTRAINER_H
 
 #include <bob.learn.misc/GMMTrainer.h>
 #include <limits>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class implements the maximum a posteriori M-step of the expectation-maximisation algorithm for a GMM Machine. The prior parameters are encoded in the form of a GMM (e.g. a universal background model). The EM algorithm thus performs GMM adaptation.
@@ -48,7 +44,7 @@ class MAP_GMMTrainer: public GMMTrainer
     /**
      * @brief Initialization
      */
-    virtual void initialize(bob::machine::GMMMachine& gmm,
+    virtual void initialize(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
@@ -77,7 +73,7 @@ class MAP_GMMTrainer: public GMMTrainer
      * Generally, this is a "universal background model" (UBM),
      * also referred to as a "world model".
      */
-    bool setPriorGMM(boost::shared_ptr<bob::machine::GMMMachine> prior_gmm);
+    bool setPriorGMM(boost::shared_ptr<bob::learn::misc::GMMMachine> prior_gmm);
 
     /**
      * @brief Performs a maximum a posteriori (MAP) update of the GMM
@@ -85,7 +81,7 @@ class MAP_GMMTrainer: public GMMTrainer
      * parameters of the prior model
      * Implements EMTrainer::mStep()
      */
-    void mStep(bob::machine::GMMMachine& gmm,
+    void mStep(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
@@ -108,7 +104,7 @@ class MAP_GMMTrainer: public GMMTrainer
      * Generally, this is a "universal background model" (UBM),
      * also referred to as a "world model"
      */
-    boost::shared_ptr<bob::machine::GMMMachine> m_prior_gmm;
+    boost::shared_ptr<bob::learn::misc::GMMMachine> m_prior_gmm;
 
     /**
      * The alpha for the Torch3-like adaptation
@@ -125,9 +121,6 @@ class MAP_GMMTrainer: public GMMTrainer
     mutable blitz::Array<double,1> m_cache_ml_weights;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif
+#endif // BOB_LEARN_MISC_MAP_GMMTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h b/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h
index d4220f4..0522f27 100644
--- a/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/ML_GMMTrainer.h
@@ -8,17 +8,13 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_ML_GMMTRAINER_H
-#define BOB_TRAINER_ML_GMMTRAINER_H
+#ifndef BOB_LEARN_MISC_ML_GMMTRAINER_H
+#define BOB_LEARN_MISC_ML_GMMTRAINER_H
 
 #include <bob.learn.misc/GMMTrainer.h>
 #include <limits>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class implements the maximum likelihood M-step of the
@@ -49,7 +45,7 @@ class ML_GMMTrainer: public GMMTrainer {
     /**
      * @brief Initialisation before the EM steps
      */
-    virtual void initialize(bob::machine::GMMMachine& gmm,
+    virtual void initialize(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
@@ -57,7 +53,7 @@ class ML_GMMTrainer: public GMMTrainer {
      * using the accumulated statistics in m_ss
      * Implements EMTrainer::mStep()
      */
-    virtual void mStep(bob::machine::GMMMachine& gmm,
+    virtual void mStep(bob::learn::misc::GMMMachine& gmm,
       const blitz::Array<double,2>& data);
 
     /**
@@ -89,9 +85,6 @@ class ML_GMMTrainer: public GMMTrainer {
     mutable blitz::Array<double,1> m_cache_ss_n_thresholded;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif
+#endif // BOB_LEARN_MISC_ML_GMMTRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/Machine.h b/bob/learn/misc/include/bob.learn.misc/Machine.h
index 5559994..edbdf22 100644
--- a/bob/learn/misc/include/bob.learn.misc/Machine.h
+++ b/bob/learn/misc/include/bob.learn.misc/Machine.h
@@ -5,8 +5,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_MACHINE_H
-#define BOB_MACHINE_MACHINE_H
+#ifndef BOB_LEARN_MISC_MACHINE_H
+#define BOB_LEARN_MISC_MACHINE_H
 
 #include <cstring>
 
@@ -14,15 +14,7 @@
  * @addtogroup MACHINE machine
  * @brief Machine module API
  */
-namespace bob {
-/**
- * @ingroup MACHINE
- */
-namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * Root class for all machines
@@ -52,8 +44,6 @@ class Machine
     virtual void forward_(const T_input& input, T_output& output) const = 0;
 };
 
-/**
- * @}
- */
-}}
-#endif 
+} } } // namespaces
+
+#endif // BOB_LEARN_MISC_MACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h b/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h
index d66643c..5c2b668 100644
--- a/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/PLDAMachine.h
@@ -8,8 +8,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_PLDAMACHINE_H
-#define BOB_MACHINE_PLDAMACHINE_H
+#ifndef BOB_LEARN_MISC_PLDAMACHINE_H
+#define BOB_LEARN_MISC_PLDAMACHINE_H
 
 #include <bob.learn.misc/Machine.h>
 #include <blitz/array.h>
@@ -18,11 +18,7 @@
 #include <iostream>
 #include <stdexcept>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class is a container for the \f$F\f$, \f$G\f$ and \f$\Sigma\f$
@@ -448,7 +444,7 @@ class PLDAMachine: public Machine<blitz::Array<double,1>, double>
      * @brief Constructor, builds a new PLDAMachine, setting a
      * PLDABase.
      */
-    PLDAMachine(const boost::shared_ptr<bob::machine::PLDABase> pldabase);
+    PLDAMachine(const boost::shared_ptr<bob::learn::misc::PLDABase> pldabase);
     /**
      * @brief Copies another PLDAMachine.\n Both PLDAMachine's will point
      * to the same PLDABase.
@@ -459,7 +455,7 @@ class PLDAMachine: public Machine<blitz::Array<double,1>, double>
      * and a PLDABase.
      */
     PLDAMachine(bob::io::base::HDF5File& config,
-      const boost::shared_ptr<bob::machine::PLDABase> pldabase);
+      const boost::shared_ptr<bob::learn::misc::PLDABase> pldabase);
 
     /**
      * @brief Just to virtualise the destructor
@@ -507,7 +503,7 @@ class PLDAMachine: public Machine<blitz::Array<double,1>, double>
     /**
      * @brief Sets the attached PLDABase
      */
-    void setPLDABase(const boost::shared_ptr<bob::machine::PLDABase> plda_base);
+    void setPLDABase(const boost::shared_ptr<bob::learn::misc::PLDABase> plda_base);
 
     /**
      * @brief Gets the feature dimensionality
@@ -702,9 +698,6 @@ class PLDAMachine: public Machine<blitz::Array<double,1>, double>
     void resizeTmp();
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif
+#endif // BOB_LEARN_MISC_PLDAMACHINE_H
diff --git a/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h b/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h
index 5a7bc64..d80372e 100644
--- a/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/PLDATrainer.h
@@ -8,8 +8,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_PLDA_TRAINER_H
-#define BOB_TRAINER_PLDA_TRAINER_H
+#ifndef BOB_LEARN_MISC_PLDA_TRAINER_H
+#define BOB_LEARN_MISC_PLDA_TRAINER_H
 
 #include <bob.learn.misc/EMTrainer.h>
 #include <bob.learn.misc/PLDAMachine.h>
@@ -17,11 +17,7 @@
 #include <map>
 #include <vector>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief This class can be used to train the \f$F\f$, \f$G\f$ and
@@ -35,7 +31,7 @@ namespace bob { namespace trainer {
  * 3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed,
  *     Elder and Prince, TPAMI'2012
  */
-class PLDATrainer: public EMTrainer<bob::machine::PLDABase,
+class PLDATrainer: public EMTrainer<bob::learn::misc::PLDABase,
                                         std::vector<blitz::Array<double,2> > >
 {
   public: //api
@@ -80,12 +76,12 @@ class PLDATrainer: public EMTrainer<bob::machine::PLDABase,
     /**
      * @brief Performs some initialization before the E- and M-steps.
      */
-    virtual void initialize(bob::machine::PLDABase& machine,
+    virtual void initialize(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
     /**
      * @brief Performs some actions after the end of the E- and M-steps.
       */
-    virtual void finalize(bob::machine::PLDABase& machine,
+    virtual void finalize(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
 
     /**
@@ -93,21 +89,21 @@ class PLDATrainer: public EMTrainer<bob::machine::PLDABase,
      * these as m_z_{first,second}_order.
      * The statistics will be used in the mStep() that follows.
      */
-    virtual void eStep(bob::machine::PLDABase& machine,
+    virtual void eStep(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
 
     /**
      * @brief Performs a maximization step to update the parameters of the
      * PLDABase
      */
-    virtual void mStep(bob::machine::PLDABase& machine,
+    virtual void mStep(bob::learn::misc::PLDABase& machine,
        const std::vector<blitz::Array<double,2> >& v_ar);
 
     /**
      * @brief Computes the average log likelihood using the current estimates
      * of the latent variables.
      */
-    virtual double computeLikelihood(bob::machine::PLDABase& machine);
+    virtual double computeLikelihood(bob::learn::misc::PLDABase& machine);
 
     /**
      * @brief Sets whether the second order statistics are stored during the
@@ -223,7 +219,7 @@ class PLDATrainer: public EMTrainer<bob::machine::PLDABase,
     /**
      * @brief Main procedure for enrolling a PLDAMachine
      */
-    void enrol(bob::machine::PLDAMachine& plda_machine,
+    void enrol(bob::learn::misc::PLDAMachine& plda_machine,
       const blitz::Array<double,2>& ar) const;
 
   private:
@@ -272,34 +268,31 @@ class PLDATrainer: public EMTrainer<bob::machine::PLDABase,
     mutable blitz::Array<double,2> m_tmp_D_nfng_2; ///< matrix of dimension (dim_d)x(dim_f+dim_g)
 
     // internal methods
-    void computeMeanVariance(bob::machine::PLDABase& machine,
+    void computeMeanVariance(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
     void initMembers(const std::vector<blitz::Array<double,2> >& v_ar);
-    void initFGSigma(bob::machine::PLDABase& machine,
+    void initFGSigma(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
-    void initF(bob::machine::PLDABase& machine,
+    void initF(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
-    void initG(bob::machine::PLDABase& machine,
+    void initG(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
-    void initSigma(bob::machine::PLDABase& machine,
+    void initSigma(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
 
     void checkTrainingData(const std::vector<blitz::Array<double,2> >& v_ar);
-    void precomputeFromFGSigma(bob::machine::PLDABase& machine);
-    void precomputeLogLike(bob::machine::PLDABase& machine,
+    void precomputeFromFGSigma(bob::learn::misc::PLDABase& machine);
+    void precomputeLogLike(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
 
-    void updateFG(bob::machine::PLDABase& machine,
+    void updateFG(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
-    void updateSigma(bob::machine::PLDABase& machine,
+    void updateSigma(bob::learn::misc::PLDABase& machine,
       const std::vector<blitz::Array<double,2> >& v_ar);
 
     void resizeTmp();
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif /* BOB_TRAINER_PLDA_TRAINER_H */
+#endif /* BOB_LEARN_MISC_PLDA_TRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/Trainer.h b/bob/learn/misc/include/bob.learn.misc/Trainer.h
index 8c70f24..4da9f70 100644
--- a/bob/learn/misc/include/bob.learn.misc/Trainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/Trainer.h
@@ -4,22 +4,14 @@
  *
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
-#ifndef BOB_TRAINER_TRAINER_H
-#define BOB_TRAINER_TRAINER_H
+#ifndef BOB_LEARN_MISC_TRAINER_H
+#define BOB_LEARN_MISC_TRAINER_H
 
 /**
  * @addtogroup TRAINER trainer
  * @brief Trainer module API
  */
-namespace bob {
-/**
- * @ingroup TRAINER
- */
-namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief Root class for all trainers
@@ -39,9 +31,6 @@ public:
   virtual void train(T_machine& machine, const T_sampler& sampler) = 0;
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif // BOB_TRAINER_TRAINER_H
+#endif // BOB_LEARN_MISC_TRAINER_H
diff --git a/bob/learn/misc/include/bob.learn.misc/WienerMachine.h b/bob/learn/misc/include/bob.learn.misc/WienerMachine.h
index 58b37f2..6e6ca3e 100644
--- a/bob/learn/misc/include/bob.learn.misc/WienerMachine.h
+++ b/bob/learn/misc/include/bob.learn.misc/WienerMachine.h
@@ -5,8 +5,8 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_WIENERMACHINE_H
-#define BOB_MACHINE_WIENERMACHINE_H
+#ifndef BOB_LEARN_MISC_WIENERMACHINE_H
+#define BOB_LEARN_MISC_WIENERMACHINE_H
 
 #include <bob.learn.misc/Machine.h>
 #include <blitz/array.h>
@@ -14,11 +14,7 @@
 #include <bob.io.base/HDF5File.h>
 #include <bob.sp/FFT2D.h>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief A Wiener machine, which can be used to denoise a signal,
@@ -211,9 +207,6 @@ class WienerMachine: Machine<blitz::Array<double,2>, blitz::Array<double,2> >
     mutable blitz::Array<std::complex<double>, 2> m_buffer2; ///< a buffer for speed
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif /* BOB_MACHINE_WIENERMACHINE_H */
+#endif /* BOB_LEARN_MISC_WIENERMACHINE_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/WienerTrainer.h b/bob/learn/misc/include/bob.learn.misc/WienerTrainer.h
index 2b9ebab..3e8f379 100644
--- a/bob/learn/misc/include/bob.learn.misc/WienerTrainer.h
+++ b/bob/learn/misc/include/bob.learn.misc/WienerTrainer.h
@@ -7,18 +7,14 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_TRAINER_WIENER_TRAINER_H
-#define BOB_TRAINER_WIENER_TRAINER_H
+#ifndef BOB_LEARN_MISC_WIENER_TRAINER_H
+#define BOB_LEARN_MISC_WIENER_TRAINER_H
 
 #include <bob.learn.misc/Trainer.h>
 #include <bob.learn.misc/WienerMachine.h>
 #include <blitz/array.h>
 
-namespace bob { namespace trainer {
-/**
- * @ingroup TRAINER
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * @brief Sets a Wiener machine to perform a Wiener filtering, using the
@@ -28,7 +24,7 @@ namespace bob { namespace trainer {
  * "Computer Vision: Algorithms and Applications", Richard Szeliski
  * (Part 3.4.3)
  */
-class WienerTrainer: Trainer<bob::machine::WienerMachine, blitz::Array<double,3> >
+class WienerTrainer: Trainer<bob::learn::misc::WienerMachine, blitz::Array<double,3> >
 {
   public: //api
     /**
@@ -69,15 +65,12 @@ class WienerTrainer: Trainer<bob::machine::WienerMachine, blitz::Array<double,3>
     /**
      * @brief Trains the WienerMachine to perform the filtering.
      */
-    virtual void train(bob::machine::WienerMachine& machine,
+    virtual void train(bob::learn::misc::WienerMachine& machine,
         const blitz::Array<double,3>& data);
 
   private: //representation
 };
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif /* BOB_TRAINER_WIENER_TRAINER_H */
+#endif /* BOB_LEARN_MISC_WIENER_TRAINER_H */
diff --git a/bob/learn/misc/include/bob.learn.misc/ZTNorm.h b/bob/learn/misc/include/bob.learn.misc/ZTNorm.h
index 9ce747e..47f0af9 100644
--- a/bob/learn/misc/include/bob.learn.misc/ZTNorm.h
+++ b/bob/learn/misc/include/bob.learn.misc/ZTNorm.h
@@ -6,22 +6,18 @@
  * Copyright (C) Idiap Research Institute, Martigny, Switzerland
  */
 
-#ifndef BOB_MACHINE_ZTNORM_H
-#define BOB_MACHINE_ZTNORM_H
+#ifndef BOB_LEARN_MISC_ZTNORM_H
+#define BOB_LEARN_MISC_ZTNORM_H
 
 #include <blitz/array.h>
 
-namespace bob { namespace machine {
-/**
- * @ingroup MACHINE
- * @{
- */
+namespace bob { namespace learn { namespace misc {
 
 /**
  * Normalise raw scores with ZT-Norm
  *
  * @exception std::runtime_error matrix sizes are not consistent
- * 
+ *
  * @param rawscores_probes_vs_models
  * @param rawscores_zprobes_vs_models
  * @param rawscores_probes_vs_tmodels
@@ -88,9 +84,6 @@ void zNorm(const blitz::Array<double,2>& rawscores_probes_vs_models,
            const blitz::Array<double,2>& rawscores_zprobes_vs_models,
            blitz::Array<double,2>& normalizedscores);
 
-/**
- * @}
- */
-}}
+} } } // namespaces
 
-#endif /* BOB_MACHINE_ZTNORM_H */
+#endif /* BOB_LEARN_MISC_ZTNORM_H */
diff --git a/bob/learn/misc/old/bic.cc b/bob/learn/misc/old/bic.cc
index 7174d24..52a1e99 100644
--- a/bob/learn/misc/old/bic.cc
+++ b/bob/learn/misc/old/bic.cc
@@ -12,25 +12,25 @@
 #include "ndarray.h"
 #include <bob.learn.misc/BICMachine.h>
 
-static double bic_call_(const bob::machine::BICMachine& machine, bob::python::const_ndarray input){
+static double bic_call_(const bob::learn::misc::BICMachine& machine, bob::python::const_ndarray input){
   double o;
   machine.forward_(input.bz<double,1>(), o);
   return o;
 }
 
-static double bic_call(const bob::machine::BICMachine& machine, bob::python::const_ndarray input){
+static double bic_call(const bob::learn::misc::BICMachine& machine, bob::python::const_ndarray input){
   double o;
   machine.forward(input.bz<double,1>(), o);
   return o;
 }
 
-static void bic_load(bob::machine::BICMachine& machine, boost::python::object file){
+static void bic_load(bob::learn::misc::BICMachine& machine, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   machine.load(*hdf5->f);
 }
 
-static void bic_save(const bob::machine::BICMachine& machine, boost::python::object file){
+static void bic_save(const bob::learn::misc::BICMachine& machine, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   machine.save(*hdf5->f);
@@ -40,7 +40,7 @@ static void bic_save(const bob::machine::BICMachine& machine, boost::python::obj
 void bind_machine_bic(){
 
   // bind BICMachine
-  boost::python::class_<bob::machine::BICMachine, boost::shared_ptr<bob::machine::BICMachine> > (
+  boost::python::class_<bob::learn::misc::BICMachine, boost::shared_ptr<bob::learn::misc::BICMachine> > (
       "BICMachine",
       "This machine is designed to classify image differences to be either intrapersonal or extrapersonal. "
       "There are two possible implementations of the BIC:\n"
@@ -61,7 +61,7 @@ void bind_machine_bic(){
     )
 
     .def(
-      boost::python::init<const bob::machine::BICMachine&>(
+      boost::python::init<const bob::learn::misc::BICMachine&>(
           (boost::python::arg("self"), boost::python::arg("other")),
           "Constructs one BICMachine from another one by doing a deep copy."
       )
@@ -73,7 +73,7 @@ void bind_machine_bic(){
 
     .def(
       "is_similar_to",
-      &bob::machine::BICMachine::is_similar_to,
+      &bob::learn::misc::BICMachine::is_similar_to,
       (boost::python::arg("self"), boost::python::arg("other"), boost::python::arg("r_epsilon") = 1e-5, boost::python::arg("a_epsilon") = 1e-8),
       "Compares this BICMachine with the 'other' one to be approximately the same."
     )
@@ -132,8 +132,8 @@ void bind_machine_bic(){
     .add_property(
       "use_dffs",
       // cast overloaded function with the same name to its type...
-      static_cast<bool (bob::machine::BICMachine::*)() const>(&bob::machine::BICMachine::use_DFFS),
-      static_cast<void (bob::machine::BICMachine::*)(bool)>(&bob::machine::BICMachine::use_DFFS),
+      static_cast<bool (bob::learn::misc::BICMachine::*)() const>(&bob::learn::misc::BICMachine::use_DFFS),
+      static_cast<void (bob::learn::misc::BICMachine::*)(bool)>(&bob::learn::misc::BICMachine::use_DFFS),
       "Should the Distance From Feature Space (DFFS) measure be added during scoring? \n\n.. warning :: Only set this flag to True if the number of intrapersonal and extrapersonal training pairs is approximately equal. Otherwise, weird thing may happen!"
   );
 }
diff --git a/bob/learn/misc/old/bic_trainer.cc b/bob/learn/misc/old/bic_trainer.cc
index bdf3ab7..7de6156 100644
--- a/bob/learn/misc/old/bic_trainer.cc
+++ b/bob/learn/misc/old/bic_trainer.cc
@@ -9,8 +9,8 @@
 #include "ndarray.h"
 #include <bob.learn.misc/BICTrainer.h>
 
-void py_train(const bob::trainer::BICTrainer& t,
-  bob::machine::BICMachine& m, bob::python::const_ndarray intra_differences,
+void py_train(const bob::learn::misc::BICTrainer& t,
+  bob::learn::misc::BICMachine& m, bob::python::const_ndarray intra_differences,
   bob::python::const_ndarray extra_differences)
 {
   t.train(m, intra_differences.bz<double,2>(),
@@ -21,7 +21,7 @@ void py_train(const bob::trainer::BICTrainer& t,
 
 void bind_trainer_bic(){
 
-  boost::python::class_<bob::trainer::BICTrainer, boost::shared_ptr<bob::trainer::BICTrainer> > (
+  boost::python::class_<bob::learn::misc::BICTrainer, boost::shared_ptr<bob::learn::misc::BICTrainer> > (
       "BICTrainer",
       "A Trainer for a BICMachine. It trains either a BIC model (including projection matrix and eigenvalues), "
           "or an IEC model (containing mean and variance only). See :py:class:`bob.machine.BICMachine` for more details.",
diff --git a/bob/learn/misc/old/empca_trainer.cc b/bob/learn/misc/old/empca_trainer.cc
index 61f2103..3df1d75 100644
--- a/bob/learn/misc/old/empca_trainer.cc
+++ b/bob/learn/misc/old/empca_trainer.cc
@@ -11,7 +11,7 @@
 
 using namespace boost::python;
 
-typedef bob::trainer::EMTrainer<bob::learn::linear::Machine, blitz::Array<double,2> > EMTrainerLinearBase;
+typedef bob::learn::misc::EMTrainer<bob::learn::linear::Machine, blitz::Array<double,2> > EMTrainerLinearBase;
 
 static void py_train(EMTrainerLinearBase& trainer,
   bob::learn::linear::Machine& machine, bob::python::const_ndarray data)
@@ -60,13 +60,13 @@ void bind_trainer_empca()
     .def("compute_likelihood", &EMTrainerLinearBase::computeLikelihood, (arg("self"), arg("machine")), "Computes the current log likelihood given the hidden variable distribution (or the sufficient statistics)")
   ;
 
-  class_<bob::trainer::EMPCATrainer, boost::noncopyable, bases<EMTrainerLinearBase> >("EMPCATrainer",
+  class_<bob::learn::misc::EMPCATrainer, boost::noncopyable, bases<EMTrainerLinearBase> >("EMPCATrainer",
       "This class implements the EM algorithm for a Linear Machine (Probabilistic PCA).\n"
       "See Section 12.2 of Bishop, \"Pattern recognition and machine learning\", 2006", init<optional<double,size_t,bool> >((arg("self"), arg("convergence_threshold"), arg("max_iterations"), arg("compute_likelihood"))))
-    .def(init<const bob::trainer::EMPCATrainer&>((arg("self"), arg("trainer")), "Copy constructs an EMPCATrainer"))
+    .def(init<const bob::learn::misc::EMPCATrainer&>((arg("self"), arg("trainer")), "Copy constructs an EMPCATrainer"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::trainer::EMPCATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this EMPCATrainer with the 'other' one to be approximately the same.")
-    .add_property("sigma2", &bob::trainer::EMPCATrainer::getSigma2, &bob::trainer::EMPCATrainer::setSigma2, "The noise sigma2 of the probabilistic model")
+    .def("is_similar_to", &bob::learn::misc::EMPCATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this EMPCATrainer with the 'other' one to be approximately the same.")
+    .add_property("sigma2", &bob::learn::misc::EMPCATrainer::getSigma2, &bob::learn::misc::EMPCATrainer::setSigma2, "The noise sigma2 of the probabilistic model")
   ;
 }
diff --git a/bob/learn/misc/old/gaussian.cc b/bob/learn/misc/old/gaussian.cc
index 3543f72..70a8856 100644
--- a/bob/learn/misc/old/gaussian.cc
+++ b/bob/learn/misc/old/gaussian.cc
@@ -15,36 +15,36 @@
 
 using namespace boost::python;
 
-static void py_setMean(bob::machine::Gaussian& machine,
+static void py_setMean(bob::learn::misc::Gaussian& machine,
   bob::python::const_ndarray mean)
 {
   machine.setMean(mean.bz<double,1>());
 }
 
-static void py_setVariance(bob::machine::Gaussian& machine,
+static void py_setVariance(bob::learn::misc::Gaussian& machine,
   bob::python::const_ndarray variance)
 {
   machine.setVariance(variance.bz<double,1>());
 }
 
-static void py_setVarianceThresholds(bob::machine::Gaussian& machine,
+static void py_setVarianceThresholds(bob::learn::misc::Gaussian& machine,
   bob::python::const_ndarray varianceThresholds)
 {
   machine.setVarianceThresholds(varianceThresholds.bz<double,1>());
 }
 
-static tuple get_shape(const bob::machine::Gaussian& m)
+static tuple get_shape(const bob::learn::misc::Gaussian& m)
 {
   return make_tuple(m.getNInputs());
 }
 
-static void set_shape(bob::machine::Gaussian& m,
+static void set_shape(bob::learn::misc::Gaussian& m,
   const blitz::TinyVector<int,1>& s)
 {
   m.resize(s(0));
 }
 
-static double py_logLikelihood(const bob::machine::Gaussian& machine,
+static double py_logLikelihood(const bob::learn::misc::Gaussian& machine,
   bob::python::const_ndarray input)
 {
   double output;
@@ -52,7 +52,7 @@ static double py_logLikelihood(const bob::machine::Gaussian& machine,
   return output;
 }
 
-static double py_logLikelihood_(const bob::machine::Gaussian& machine,
+static double py_logLikelihood_(const bob::learn::misc::Gaussian& machine,
   bob::python::const_ndarray input)
 {
   double output;
@@ -61,19 +61,19 @@ static double py_logLikelihood_(const bob::machine::Gaussian& machine,
 }
 
 
-static boost::shared_ptr<bob::machine::Gaussian> _init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::Gaussian> _init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::Gaussian>(new bob::machine::Gaussian(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::Gaussian>(new bob::learn::misc::Gaussian(*hdf5->f));
 }
 
-static void _load(bob::machine::Gaussian& self, boost::python::object file){
+static void _load(bob::learn::misc::Gaussian& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void _save(const bob::machine::Gaussian& self, boost::python::object file){
+static void _save(const bob::learn::misc::Gaussian& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
@@ -82,26 +82,26 @@ static void _save(const bob::machine::Gaussian& self, boost::python::object file
 
 void bind_machine_gaussian()
 {
-  class_<bob::machine::Gaussian, boost::shared_ptr<bob::machine::Gaussian>, bases<bob::machine::Machine<blitz::Array<double,1>, double> > >("Gaussian",
+  class_<bob::learn::misc::Gaussian, boost::shared_ptr<bob::learn::misc::Gaussian>, bases<bob::learn::misc::Machine<blitz::Array<double,1>, double> > >("Gaussian",
     "This class implements a multivariate diagonal Gaussian distribution.", no_init)
     .def("__init__", boost::python::make_constructor(&_init))
     .def(init<>(arg("self")))
     .def(init<const size_t>((arg("self"), arg("n_inputs"))))
-    .def(init<bob::machine::Gaussian&>((arg("self"), arg("other"))))
+    .def(init<bob::learn::misc::Gaussian&>((arg("self"), arg("other"))))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::Gaussian::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this Gaussian with the 'other' one to be approximately the same.")
-    .add_property("dim_d", &bob::machine::Gaussian::getNInputs, &bob::machine::Gaussian::setNInputs,
+    .def("is_similar_to", &bob::learn::misc::Gaussian::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this Gaussian with the 'other' one to be approximately the same.")
+    .add_property("dim_d", &bob::learn::misc::Gaussian::getNInputs, &bob::learn::misc::Gaussian::setNInputs,
       "Dimensionality of the input feature space")
-    .add_property("mean", make_function(&bob::machine::Gaussian::getMean, return_value_policy<copy_const_reference>()), &py_setMean, "Mean of the Gaussian")
-    .add_property("variance", make_function(&bob::machine::Gaussian::getVariance, return_value_policy<copy_const_reference>()), &py_setVariance, "The diagonal of the (diagonal) covariance matrix")
-    .add_property("variance_thresholds", make_function(&bob::machine::Gaussian::getVarianceThresholds, return_value_policy<copy_const_reference>()), &py_setVarianceThresholds,
+    .add_property("mean", make_function(&bob::learn::misc::Gaussian::getMean, return_value_policy<copy_const_reference>()), &py_setMean, "Mean of the Gaussian")
+    .add_property("variance", make_function(&bob::learn::misc::Gaussian::getVariance, return_value_policy<copy_const_reference>()), &py_setVariance, "The diagonal of the (diagonal) covariance matrix")
+    .add_property("variance_thresholds", make_function(&bob::learn::misc::Gaussian::getVarianceThresholds, return_value_policy<copy_const_reference>()), &py_setVarianceThresholds,
       "The variance flooring thresholds, i.e. the minimum allowed value of variance in each dimension. "
       "The variance will be set to this value if an attempt is made to set it to a smaller value.")
     .add_property("shape", &get_shape, &set_shape, "A tuple that represents the dimensionality of the Gaussian ``(dim_d,)``.")
-    .def("set_variance_thresholds",  (void (bob::machine::Gaussian::*)(const double))&bob::machine::Gaussian::setVarianceThresholds, (arg("self"), arg("var_thd")),
+    .def("set_variance_thresholds",  (void (bob::learn::misc::Gaussian::*)(const double))&bob::learn::misc::Gaussian::setVarianceThresholds, (arg("self"), arg("var_thd")),
          "Set the variance flooring thresholds equal to the given threshold for all the dimensions.")
-    .def("resize", &bob::machine::Gaussian::resize, (arg("self"), arg("dim_d")), "Set the input dimensionality, reset the mean to zero and the variance to one.")
+    .def("resize", &bob::learn::misc::Gaussian::resize, (arg("self"), arg("dim_d")), "Set the input dimensionality, reset the mean to zero and the variance to one.")
     .def("log_likelihood", &py_logLikelihood, (arg("self"), arg("sample")), "Output the log likelihood of the sample, x. The input size is checked.")
     .def("log_likelihood_", &py_logLikelihood_, (arg("self"), arg("sample")), "Output the log likelihood of the sample, x. The input size is NOT checked.")
     .def("save", &_save, (arg("self"), arg("config")), "Save to a Configuration")
diff --git a/bob/learn/misc/old/gmm.cc b/bob/learn/misc/old/gmm.cc
index 73919b6..00b290d 100644
--- a/bob/learn/misc/old/gmm.cc
+++ b/bob/learn/misc/old/gmm.cc
@@ -16,7 +16,7 @@
 
 using namespace boost::python;
 
-static object py_gmmstats_getN(bob::machine::GMMStats& s)
+static object py_gmmstats_getN(bob::learn::misc::GMMStats& s)
 {
   bob::python::ndarray n(bob::io::base::array::t_float64, s.n.extent(0));
   blitz::Array<double,1> n_ = n.bz<double,1>();
@@ -24,13 +24,13 @@ static object py_gmmstats_getN(bob::machine::GMMStats& s)
   return n.self();
 }
 
-static void py_gmmstats_setN(bob::machine::GMMStats& s,
+static void py_gmmstats_setN(bob::learn::misc::GMMStats& s,
   bob::python::const_ndarray n)
 {
   s.n = n.bz<double,1>();
 }
 
-static object py_gmmstats_getSumpx(bob::machine::GMMStats& s)
+static object py_gmmstats_getSumpx(bob::learn::misc::GMMStats& s)
 {
   bob::python::ndarray sumpx(bob::io::base::array::t_float64, s.sumPx.extent(0),
     s.sumPx.extent(1));
@@ -39,13 +39,13 @@ static object py_gmmstats_getSumpx(bob::machine::GMMStats& s)
   return sumpx.self();
 }
 
-static void py_gmmstats_setSumpx(bob::machine::GMMStats& s,
+static void py_gmmstats_setSumpx(bob::learn::misc::GMMStats& s,
   bob::python::const_ndarray sumpx)
 {
   s.sumPx = sumpx.bz<double,2>();
 }
 
-static object py_gmmstats_getSumpxx(bob::machine::GMMStats& s)
+static object py_gmmstats_getSumpxx(bob::learn::misc::GMMStats& s)
 {
   bob::python::ndarray sumpxx(bob::io::base::array::t_float64, s.sumPxx.extent(0),
     s.sumPxx.extent(1));
@@ -54,20 +54,20 @@ static object py_gmmstats_getSumpxx(bob::machine::GMMStats& s)
   return sumpxx.self();
 }
 
-static void py_gmmstats_setSumpxx(bob::machine::GMMStats& s,
+static void py_gmmstats_setSumpxx(bob::learn::misc::GMMStats& s,
   bob::python::const_ndarray sumpxx)
 {
   s.sumPxx = sumpxx.bz<double,2>();
 }
 
 
-static void py_gmmmachine_setWeights(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setWeights(bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray weights)
 {
   machine.setWeights(weights.bz<double,1>());
 }
 
-static object py_gmmmachine_getMeans(const bob::machine::GMMMachine& machine)
+static object py_gmmmachine_getMeans(const bob::learn::misc::GMMMachine& machine)
 {
   bob::python::ndarray means(bob::io::base::array::t_float64,
     machine.getNGaussians(), machine.getNInputs());
@@ -76,19 +76,19 @@ static object py_gmmmachine_getMeans(const bob::machine::GMMMachine& machine)
   return means.self();
 }
 
-static void py_gmmmachine_setMeans(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setMeans(bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray means)
 {
   machine.setMeans(means.bz<double,2>());
 }
 
-static void py_gmmmachine_setMeanSupervector(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setMeanSupervector(bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray vec)
 {
   machine.setMeanSupervector(vec.bz<double,1>());
 }
 
-static object py_gmmmachine_getVariances(const bob::machine::GMMMachine& machine)
+static object py_gmmmachine_getVariances(const bob::learn::misc::GMMMachine& machine)
 {
   bob::python::ndarray variances(bob::io::base::array::t_float64,
     machine.getNGaussians(), machine.getNInputs());
@@ -97,19 +97,19 @@ static object py_gmmmachine_getVariances(const bob::machine::GMMMachine& machine
   return variances.self();
 }
 
-static void py_gmmmachine_setVariances(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setVariances(bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray variances)
 {
   machine.setVariances(variances.bz<double,2>());
 }
 
-static void py_gmmmachine_setVarianceSupervector(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setVarianceSupervector(bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray vec)
 {
   machine.setVarianceSupervector(vec.bz<double,1>());
 }
 
-static object py_gmmmachine_getVarianceThresholds(const bob::machine::GMMMachine& machine)
+static object py_gmmmachine_getVarianceThresholds(const bob::learn::misc::GMMMachine& machine)
 {
   bob::python::ndarray varianceThresholds(bob::io::base::array::t_float64,
     machine.getNGaussians(), machine.getNInputs());
@@ -118,13 +118,13 @@ static object py_gmmmachine_getVarianceThresholds(const bob::machine::GMMMachine
   return varianceThresholds.self();
 }
 
-static void py_gmmmachine_setVarianceThresholds(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setVarianceThresholds(bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray varianceThresholds)
 {
   machine.setVarianceThresholds(varianceThresholds.bz<double,2>());
 }
 
-static void py_gmmmachine_setVarianceThresholdsOther(bob::machine::GMMMachine& machine,
+static void py_gmmmachine_setVarianceThresholdsOther(bob::learn::misc::GMMMachine& machine,
   object o)
 {
   extract<int> int_check(o);
@@ -145,45 +145,45 @@ static void py_gmmmachine_setVarianceThresholdsOther(bob::machine::GMMMachine& m
   }
 }
 
-static tuple py_gmmmachine_get_shape(const bob::machine::GMMMachine& m)
+static tuple py_gmmmachine_get_shape(const bob::learn::misc::GMMMachine& m)
 {
   return make_tuple(m.getNGaussians(), m.getNInputs());
 }
 
-static void py_gmmmachine_set_shape(bob::machine::GMMMachine& m,
+static void py_gmmmachine_set_shape(bob::learn::misc::GMMMachine& m,
   const blitz::TinyVector<int,2>& s)
 {
   m.resize(s(0), s(1));
 }
 
-static double py_gmmmachine_loglikelihoodA(const bob::machine::GMMMachine& machine,
+static double py_gmmmachine_loglikelihoodA(const bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray x, bob::python::ndarray ll)
 {
   blitz::Array<double,1> ll_ = ll.bz<double,1>();
   return machine.logLikelihood(x.bz<double,1>(), ll_);
 }
 
-static double py_gmmmachine_loglikelihoodA_(const bob::machine::GMMMachine& machine,
+static double py_gmmmachine_loglikelihoodA_(const bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray x, bob::python::ndarray ll)
 {
   blitz::Array<double,1> ll_ = ll.bz<double,1>();
   return machine.logLikelihood_(x.bz<double,1>(), ll_);
 }
 
-static double py_gmmmachine_loglikelihoodB(const bob::machine::GMMMachine& machine,
+static double py_gmmmachine_loglikelihoodB(const bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray x)
 {
   return machine.logLikelihood(x.bz<double,1>());
 }
 
-static double py_gmmmachine_loglikelihoodB_(const bob::machine::GMMMachine& machine,
+static double py_gmmmachine_loglikelihoodB_(const bob::learn::misc::GMMMachine& machine,
   bob::python::const_ndarray x)
 {
   return machine.logLikelihood_(x.bz<double,1>());
 }
 
-static void py_gmmmachine_accStatistics(const bob::machine::GMMMachine& machine,
-  bob::python::const_ndarray x, bob::machine::GMMStats& gs)
+static void py_gmmmachine_accStatistics(const bob::learn::misc::GMMMachine& machine,
+  bob::python::const_ndarray x, bob::learn::misc::GMMStats& gs)
 {
   const bob::io::base::array::typeinfo& info = x.type();
   switch(info.nd) {
@@ -198,8 +198,8 @@ static void py_gmmmachine_accStatistics(const bob::machine::GMMMachine& machine,
   }
 }
 
-static void py_gmmmachine_accStatistics_(const bob::machine::GMMMachine& machine,
-  bob::python::const_ndarray x, bob::machine::GMMStats& gs)
+static void py_gmmmachine_accStatistics_(const bob::learn::misc::GMMMachine& machine,
+  bob::python::const_ndarray x, bob::learn::misc::GMMStats& gs)
 {
   const bob::io::base::array::typeinfo& info = x.type();
   switch(info.nd) {
@@ -214,43 +214,43 @@ static void py_gmmmachine_accStatistics_(const bob::machine::GMMMachine& machine
   }
 }
 
-static boost::shared_ptr<bob::machine::GMMStats> s_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::GMMStats> s_init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::GMMStats>(new bob::machine::GMMStats(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::GMMStats>(new bob::learn::misc::GMMStats(*hdf5->f));
 }
 
-static void s_load(bob::machine::GMMStats& self, boost::python::object file){
+static void s_load(bob::learn::misc::GMMStats& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void s_save(const bob::machine::GMMStats& self, boost::python::object file){
+static void s_save(const bob::learn::misc::GMMStats& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
 }
 
 
-static boost::shared_ptr<bob::machine::GMMMachine> m_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::GMMMachine> m_init(boost::python::object file){
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::GMMMachine>(new bob::machine::GMMMachine(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::GMMMachine>(new bob::learn::misc::GMMMachine(*hdf5->f));
 }
 
-static void m_load(bob::machine::GMMMachine& self, boost::python::object file){
+static void m_load(bob::learn::misc::GMMMachine& self, boost::python::object file){
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void m_save(const bob::machine::GMMMachine& self, boost::python::object file){
+static void m_save(const bob::learn::misc::GMMMachine& self, boost::python::object file){
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
 }
 
 void bind_machine_gmm()
 {
-  class_<bob::machine::GMMStats, boost::shared_ptr<bob::machine::GMMStats> >("GMMStats",
+  class_<bob::learn::misc::GMMStats, boost::shared_ptr<bob::learn::misc::GMMStats> >("GMMStats",
       "A container for GMM statistics.\n"
       "With respect to Reynolds, \"Speaker Verification Using Adapted "
       "Gaussian Mixture Models\", DSP, 2000:\n"
@@ -260,60 +260,60 @@ void bind_machine_gmm()
       init<>(arg("self")))
     .def("__init__", boost::python::make_constructor(&s_init))
     .def(init<const size_t, const size_t>((arg("self"), arg("n_gaussians"), arg("n_inputs"))))
-    .def(init<bob::machine::GMMStats&>((arg("self"), arg("other")), "Creates a GMMStats from another GMMStats, using the copy constructor."))
+    .def(init<bob::learn::misc::GMMStats&>((arg("self"), arg("other")), "Creates a GMMStats from another GMMStats, using the copy constructor."))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::GMMStats::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this GMMStats with the 'other' one to be approximately the same.")
-    .def_readwrite("log_likelihood", &bob::machine::GMMStats::log_likelihood, "The accumulated log likelihood of all samples")
-    .def_readwrite("t", &bob::machine::GMMStats::T, "The accumulated number of samples")
+    .def("is_similar_to", &bob::learn::misc::GMMStats::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this GMMStats with the 'other' one to be approximately the same.")
+    .def_readwrite("log_likelihood", &bob::learn::misc::GMMStats::log_likelihood, "The accumulated log likelihood of all samples")
+    .def_readwrite("t", &bob::learn::misc::GMMStats::T, "The accumulated number of samples")
     .add_property("n", &py_gmmstats_getN, &py_gmmstats_setN, "For each Gaussian, the accumulated sum of responsibilities, i.e. the sum of P(gaussian_i|x)")
     .add_property("sum_px", &py_gmmstats_getSumpx, &py_gmmstats_setSumpx, "For each Gaussian, the accumulated sum of responsibility times the sample ")
     .add_property("sum_pxx", &py_gmmstats_getSumpxx, &py_gmmstats_setSumpxx, "For each Gaussian, the accumulated sum of responsibility times the sample squared")
-    .def("resize", &bob::machine::GMMStats::resize, (arg("self"), arg("n_gaussians"), arg("n_inputs")),
+    .def("resize", &bob::learn::misc::GMMStats::resize, (arg("self"), arg("n_gaussians"), arg("n_inputs")),
          " Allocates space for the statistics and resets to zero.")
-    .def("init", &bob::machine::GMMStats::init, (arg("self")), "Resets statistics to zero.")
+    .def("init", &bob::learn::misc::GMMStats::init, (arg("self")), "Resets statistics to zero.")
     .def("save", &s_save, (arg("self"), arg("config")), "Save to a Configuration")
     .def("load", &s_load, (arg("self"), arg("config")), "Load from a Configuration")
     .def(self_ns::str(self_ns::self))
     .def(self_ns::self += self_ns::self)
   ;
 
-  class_<bob::machine::GMMMachine, boost::shared_ptr<bob::machine::GMMMachine>, bases<bob::machine::Machine<blitz::Array<double,1>, double> > >("GMMMachine",
+  class_<bob::learn::misc::GMMMachine, boost::shared_ptr<bob::learn::misc::GMMMachine>, bases<bob::learn::misc::Machine<blitz::Array<double,1>, double> > >("GMMMachine",
       "This class implements a multivariate diagonal Gaussian distribution.\n"
       "See Section 2.3.9 of Bishop, \"Pattern recognition and machine learning\", 2006",
       init<>(arg("self")))
     .def("__init__", boost::python::make_constructor(&m_init))
-    .def(init<bob::machine::GMMMachine&>((arg("self"), arg("other")), "Creates a GMMMachine from another GMMMachine, using the copy constructor."))
+    .def(init<bob::learn::misc::GMMMachine&>((arg("self"), arg("other")), "Creates a GMMMachine from another GMMMachine, using the copy constructor."))
     .def(init<const size_t, const size_t>((arg("self"), arg("n_gaussians"), arg("n_inputs"))))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::GMMMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this GMMMachine with the 'other' one to be approximately the same.")
-    .add_property("dim_d", &bob::machine::GMMMachine::getNInputs, &bob::machine::GMMMachine::setNInputs, "The feature dimensionality D")
-    .add_property("dim_c", &bob::machine::GMMMachine::getNGaussians, "The number of Gaussian components C")
-    .add_property("weights", make_function(&bob::machine::GMMMachine::getWeights, return_value_policy<copy_const_reference>()), &py_gmmmachine_setWeights, "The weights (also known as \"mixing coefficients\")")
+    .def("is_similar_to", &bob::learn::misc::GMMMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this GMMMachine with the 'other' one to be approximately the same.")
+    .add_property("dim_d", &bob::learn::misc::GMMMachine::getNInputs, &bob::learn::misc::GMMMachine::setNInputs, "The feature dimensionality D")
+    .add_property("dim_c", &bob::learn::misc::GMMMachine::getNGaussians, "The number of Gaussian components C")
+    .add_property("weights", make_function(&bob::learn::misc::GMMMachine::getWeights, return_value_policy<copy_const_reference>()), &py_gmmmachine_setWeights, "The weights (also known as \"mixing coefficients\")")
     .add_property("means", &py_gmmmachine_getMeans, &py_gmmmachine_setMeans, "The means of the gaussians")
-    .add_property("mean_supervector", make_function((const blitz::Array<double,1>& (bob::machine::GMMMachine::*)(void) const)&bob::machine::GMMMachine::getMeanSupervector, return_value_policy<copy_const_reference>()), &py_gmmmachine_setMeanSupervector,
+    .add_property("mean_supervector", make_function((const blitz::Array<double,1>& (bob::learn::misc::GMMMachine::*)(void) const)&bob::learn::misc::GMMMachine::getMeanSupervector, return_value_policy<copy_const_reference>()), &py_gmmmachine_setMeanSupervector,
                   "The mean supervector of the GMMMachine "
                   "(concatenation of the mean vectors of each Gaussian of the GMMMachine")
     .add_property("variances", &py_gmmmachine_getVariances, &py_gmmmachine_setVariances, "The (diagonal) variances of the Gaussians")
-    .add_property("variance_supervector", make_function((const blitz::Array<double,1>& (bob::machine::GMMMachine::*)(void) const)&bob::machine::GMMMachine::getVarianceSupervector, return_value_policy<copy_const_reference>()), &py_gmmmachine_setVarianceSupervector,
+    .add_property("variance_supervector", make_function((const blitz::Array<double,1>& (bob::learn::misc::GMMMachine::*)(void) const)&bob::learn::misc::GMMMachine::getVarianceSupervector, return_value_policy<copy_const_reference>()), &py_gmmmachine_setVarianceSupervector,
                   "The variance supervector of the GMMMachine "
                   "(concatenation of the variance vectors of each Gaussian of the GMMMachine")
     .add_property("variance_thresholds", &py_gmmmachine_getVarianceThresholds, &py_gmmmachine_setVarianceThresholds,
                   "The variance flooring thresholds for each Gaussian in each dimension")
     .add_property("shape", &py_gmmmachine_get_shape, &py_gmmmachine_set_shape, "A tuple that represents the dimensionality of the GMMMachine ``(n_gaussians, n_inputs)``.")
-    .def("resize", &bob::machine::GMMMachine::resize, (arg("self"), arg("n_gaussians"), arg("n_inputs")),
+    .def("resize", &bob::learn::misc::GMMMachine::resize, (arg("self"), arg("n_gaussians"), arg("n_inputs")),
          "Reset the input dimensionality, and the number of Gaussian components.\n"
          "Initialises the weights to uniform distribution.")
     .def("set_variance_thresholds", &py_gmmmachine_setVarianceThresholdsOther, (arg("self"), arg("variance_threshold")),
          "Set the variance flooring thresholds in each dimension to the same vector for all Gaussian components if the argument is a 1D numpy arrray, and equal for all Gaussian components and dimensions if the parameter is a scalar.")
-    .def("update_gaussian", &bob::machine::GMMMachine::updateGaussian, (arg("self"), arg("i")),
+    .def("update_gaussian", &bob::learn::misc::GMMMachine::updateGaussian, (arg("self"), arg("i")),
          "Get the specified Gaussian component. An exception is thrown if i is out of range.")
 
     .def("log_likelihood", &py_gmmmachine_loglikelihoodA, args("self", "x", "log_weighted_gaussian_likelihoods"),
-         "Output the log likelihood of the sample, x, i.e. log(p(x|bob::machine::GMMMachine)). Inputs are checked.")
+         "Output the log likelihood of the sample, x, i.e. log(p(x|bob::learn::misc::GMMMachine)). Inputs are checked.")
     .def("log_likelihood_", &py_gmmmachine_loglikelihoodA_, args("self", "x", "log_weighted_gaussian_likelihoods"),
-         "Output the log likelihood of the sample, x, i.e. log(p(x|bob::machine::GMMMachine)). Inputs are NOT checked.")
+         "Output the log likelihood of the sample, x, i.e. log(p(x|bob::learn::misc::GMMMachine)). Inputs are NOT checked.")
     .def("log_likelihood", &py_gmmmachine_loglikelihoodB, args("self", "x"),
          " Output the log likelihood of the sample, x, i.e. log(p(x|GMM)). Inputs are checked.")
     .def("log_likelihood_", &py_gmmmachine_loglikelihoodB_, args("self", "x"),
diff --git a/bob/learn/misc/old/gmm_trainer.cc b/bob/learn/misc/old/gmm_trainer.cc
index 49063b7..9ec49c7 100644
--- a/bob/learn/misc/old/gmm_trainer.cc
+++ b/bob/learn/misc/old/gmm_trainer.cc
@@ -14,29 +14,29 @@
 
 using namespace boost::python;
 
-typedef bob::trainer::EMTrainer<bob::machine::GMMMachine, blitz::Array<double,2> > EMTrainerGMMBase;
+typedef bob::learn::misc::EMTrainer<bob::learn::misc::GMMMachine, blitz::Array<double,2> > EMTrainerGMMBase;
 
-static void py_train(EMTrainerGMMBase& trainer, bob::machine::GMMMachine& machine, bob::python::const_ndarray sample)
+static void py_train(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.train(machine, sample.bz<double,2>());
 }
 
-static void py_initialize(EMTrainerGMMBase& trainer, bob::machine::GMMMachine& machine, bob::python::const_ndarray sample)
+static void py_initialize(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.initialize(machine, sample.bz<double,2>());
 }
 
-static void py_finalize(EMTrainerGMMBase& trainer, bob::machine::GMMMachine& machine, bob::python::const_ndarray sample)
+static void py_finalize(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.finalize(machine, sample.bz<double,2>());
 }
 
-static void py_eStep(EMTrainerGMMBase& trainer, bob::machine::GMMMachine& machine, bob::python::const_ndarray sample)
+static void py_eStep(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.eStep(machine, sample.bz<double,2>());
 }
 
-static void py_mStep(EMTrainerGMMBase& trainer, bob::machine::GMMMachine& machine, bob::python::const_ndarray sample)
+static void py_mStep(EMTrainerGMMBase& trainer, bob::learn::misc::GMMMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.mStep(machine, sample.bz<double,2>());
 }
@@ -59,30 +59,30 @@ void bind_trainer_gmm() {
     .def("compute_likelihood", &EMTrainerGMMBase::computeLikelihood, (arg("self"), arg("machine")), "Returns the likelihood.")
   ;
 
-  class_<bob::trainer::GMMTrainer, boost::noncopyable, bases<EMTrainerGMMBase> >("GMMTrainer",
+  class_<bob::learn::misc::GMMTrainer, boost::noncopyable, bases<EMTrainerGMMBase> >("GMMTrainer",
       "This class implements the E-step of the expectation-maximisation algorithm for a GMM Machine.\n"
       "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006", no_init)
-    .add_property("gmm_statistics", make_function(&bob::trainer::GMMTrainer::getGMMStats, return_value_policy<copy_const_reference>()), &bob::trainer::GMMTrainer::setGMMStats, "The internal GMM statistics. Useful to parallelize the E-step.")
+    .add_property("gmm_statistics", make_function(&bob::learn::misc::GMMTrainer::getGMMStats, return_value_policy<copy_const_reference>()), &bob::learn::misc::GMMTrainer::setGMMStats, "The internal GMM statistics. Useful to parallelize the E-step.")
   ;
 
-  class_<bob::trainer::MAP_GMMTrainer, boost::noncopyable, bases<bob::trainer::GMMTrainer> >("MAP_GMMTrainer",
+  class_<bob::learn::misc::MAP_GMMTrainer, boost::noncopyable, bases<bob::learn::misc::GMMTrainer> >("MAP_GMMTrainer",
       "This class implements the maximum a posteriori M-step "
       "of the expectation-maximisation algorithm for a GMM Machine. "
       "The prior parameters are encoded in the form of a GMM (e.g. a universal background model). "
       "The EM algorithm thus performs GMM adaptation.\n"
       "See Section 3.4 of Reynolds et al., \"Speaker Verification Using Adapted Gaussian Mixture Models\", Digital Signal Processing, 2000. We use a \"single adaptation coefficient\", alpha_i, and thus a single relevance factor, r.",
       init<optional<const double, const bool, const bool, const bool, const double> >((arg("self"), arg("relevance_factor")=0, arg("update_means")=true, arg("update_variances")=false, arg("update_weights")=false, arg("responsibilities_threshold")=std::numeric_limits<double>::epsilon())))
-    .def("set_prior_gmm", &bob::trainer::MAP_GMMTrainer::setPriorGMM, (arg("self"), arg("prior_gmm")),
+    .def("set_prior_gmm", &bob::learn::misc::MAP_GMMTrainer::setPriorGMM, (arg("self"), arg("prior_gmm")),
       "Set the GMM to use as a prior for MAP adaptation. "
       "Generally, this is a \"universal background model\" (UBM), "
       "also referred to as a \"world model\".")
-    .def("set_t3_map", &bob::trainer::MAP_GMMTrainer::setT3MAP, (arg("self"), arg("alpha")),
+    .def("set_t3_map", &bob::learn::misc::MAP_GMMTrainer::setT3MAP, (arg("self"), arg("alpha")),
       "Use a torch3-like MAP adaptation rule instead of Reynolds'one.")
-    .def("unset_t3_map", &bob::trainer::MAP_GMMTrainer::unsetT3MAP, (arg("self")),
+    .def("unset_t3_map", &bob::learn::misc::MAP_GMMTrainer::unsetT3MAP, (arg("self")),
       "Use a Reynolds' MAP adaptation (rather than torch3-like).")
   ;
 
-  class_<bob::trainer::ML_GMMTrainer, boost::noncopyable, bases<bob::trainer::GMMTrainer> >("ML_GMMTrainer",
+  class_<bob::learn::misc::ML_GMMTrainer, boost::noncopyable, bases<bob::learn::misc::GMMTrainer> >("ML_GMMTrainer",
       "This class implements the maximum likelihood M-step of the expectation-maximisation algorithm for a GMM Machine.\n"
       "See Section 9.2.2 of Bishop, \"Pattern recognition and machine learning\", 2006",
       init<optional<const bool, const bool, const bool, const double> >((arg("self"), arg("update_means")=true, arg("update_variances")=false, arg("update_weights")=false, arg("responsibilities_threshold")=std::numeric_limits<double>::epsilon())))
diff --git a/bob/learn/misc/old/ivector.cc b/bob/learn/misc/old/ivector.cc
index 1e72d58..b36e66e 100644
--- a/bob/learn/misc/old/ivector.cc
+++ b/bob/learn/misc/old/ivector.cc
@@ -15,27 +15,27 @@
 
 using namespace boost::python;
 
-static void py_iv_setT(bob::machine::IVectorMachine& machine,
+static void py_iv_setT(bob::learn::misc::IVectorMachine& machine,
   bob::python::const_ndarray T)
 {
   machine.setT(T.bz<double,2>());
 }
 
-static void py_iv_setSigma(bob::machine::IVectorMachine& machine,
+static void py_iv_setSigma(bob::learn::misc::IVectorMachine& machine,
   bob::python::const_ndarray sigma)
 {
   machine.setSigma(sigma.bz<double,1>());
 }
 
-static void py_computeIdTtSigmaInvT1(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs, bob::python::ndarray output)
+static void py_computeIdTtSigmaInvT1(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs, bob::python::ndarray output)
 {
   blitz::Array<double,2> output_ = output.bz<double,2>();
   machine.computeIdTtSigmaInvT(gs, output_);
 }
 
-static object py_computeIdTtSigmaInvT2(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs)
+static object py_computeIdTtSigmaInvT2(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs)
 {
   bob::python::ndarray output(bob::io::base::array::t_float64, machine.getDimRt(), machine.getDimRt());
   blitz::Array<double,2> output_ = output.bz<double,2>();
@@ -43,15 +43,15 @@ static object py_computeIdTtSigmaInvT2(const bob::machine::IVectorMachine& machi
   return output.self();
 }
 
-static void py_computeTtSigmaInvFnorm1(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs, bob::python::ndarray output)
+static void py_computeTtSigmaInvFnorm1(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs, bob::python::ndarray output)
 {
   blitz::Array<double,1> output_ = output.bz<double,1>();
   machine.computeTtSigmaInvFnorm(gs, output_);
 }
 
-static object py_computeTtSigmaInvFnorm2(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs)
+static object py_computeTtSigmaInvFnorm2(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs)
 {
   bob::python::ndarray output(bob::io::base::array::t_float64, machine.getDimRt());
   blitz::Array<double,1> output_ = output.bz<double,1>();
@@ -59,22 +59,22 @@ static object py_computeTtSigmaInvFnorm2(const bob::machine::IVectorMachine& mac
   return output.self();
 }
 
-static void py_iv_forward1(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs, bob::python::ndarray ivector)
+static void py_iv_forward1(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs, bob::python::ndarray ivector)
 {
   blitz::Array<double,1> ivector_ = ivector.bz<double,1>();
   machine.forward(gs, ivector_);
 }
 
-static void py_iv_forward1_(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs, bob::python::ndarray ivector)
+static void py_iv_forward1_(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs, bob::python::ndarray ivector)
 {
   blitz::Array<double,1> ivector_ = ivector.bz<double,1>();
   machine.forward_(gs, ivector_);
 }
 
-static object py_iv_forward2(const bob::machine::IVectorMachine& machine,
-  const bob::machine::GMMStats& gs)
+static object py_iv_forward2(const bob::learn::misc::IVectorMachine& machine,
+  const bob::learn::misc::GMMStats& gs)
 {
   bob::python::ndarray ivector(bob::io::base::array::t_float64, machine.getDimRt());
   blitz::Array<double,1> ivector_ = ivector.bz<double,1>();
@@ -84,19 +84,19 @@ static object py_iv_forward2(const bob::machine::IVectorMachine& machine,
 
 
 
-static boost::shared_ptr<bob::machine::IVectorMachine> _init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::IVectorMachine> _init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::IVectorMachine>(new bob::machine::IVectorMachine(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::IVectorMachine>(new bob::learn::misc::IVectorMachine(*hdf5->f));
 }
 
-static void _load(bob::machine::IVectorMachine& self, boost::python::object file){
+static void _load(bob::learn::misc::IVectorMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void _save(const bob::machine::IVectorMachine& self, boost::python::object file){
+static void _save(const bob::learn::misc::IVectorMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
@@ -105,24 +105,24 @@ static void _save(const bob::machine::IVectorMachine& self, boost::python::objec
 void bind_machine_ivector()
 {
   // TODO: reuse binding from generic machine
-  class_<bob::machine::IVectorMachine, boost::shared_ptr<bob::machine::IVectorMachine> >("IVectorMachine", "An IVectorMachine to extract i-vector.\n\nReferences:\n[1] 'Front End Factor Analysis for Speaker Verification', N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet, IEEE Transactions on Audio, Speech and Language Processing, 2010, vol. 19, issue 4, pp. 788-798", init<boost::shared_ptr<bob::machine::GMMMachine>, optional<const size_t, const size_t> >((arg("self"), arg("ubm"), arg("rt")=1, arg("variance_threshold")=1e-10), "Builds a new IVectorMachine."))
+  class_<bob::learn::misc::IVectorMachine, boost::shared_ptr<bob::learn::misc::IVectorMachine> >("IVectorMachine", "An IVectorMachine to extract i-vector.\n\nReferences:\n[1] 'Front End Factor Analysis for Speaker Verification', N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet, IEEE Transactions on Audio, Speech and Language Processing, 2010, vol. 19, issue 4, pp. 788-798", init<boost::shared_ptr<bob::learn::misc::GMMMachine>, optional<const size_t, const size_t> >((arg("self"), arg("ubm"), arg("rt")=1, arg("variance_threshold")=1e-10), "Builds a new IVectorMachine."))
     .def(init<>((arg("self")), "Constructs a new empty IVectorMachine."))
     .def("__init__", boost::python::make_constructor(&_init), "Constructs a new IVectorMachine from a configuration file.")
-    .def(init<const bob::machine::IVectorMachine&>((arg("self"), arg("machine")), "Copy constructs an IVectorMachine"))
+    .def(init<const bob::learn::misc::IVectorMachine&>((arg("self"), arg("machine")), "Copy constructs an IVectorMachine"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::IVectorMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this IVectorMachine with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::IVectorMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this IVectorMachine with the 'other' one to be approximately the same.")
     .def("load", &_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
     .def("save", &_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("resize", &bob::machine::IVectorMachine::resize, (arg("self"), arg("rt")), "Reset the dimensionality of the Total Variability subspace T.")
-    .add_property("ubm", &bob::machine::IVectorMachine::getUbm, &bob::machine::IVectorMachine::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
-    .add_property("t", make_function(&bob::machine::IVectorMachine::getT, return_value_policy<copy_const_reference>()), &py_iv_setT, "The subspace T (Total Variability matrix)")
-    .add_property("sigma", make_function(&bob::machine::IVectorMachine::getSigma, return_value_policy<copy_const_reference>()), &py_iv_setSigma, "The residual matrix of the model sigma")
-    .add_property("variance_threshold", &bob::machine::IVectorMachine::getVarianceThreshold, &bob::machine::IVectorMachine::setVarianceThreshold, "Threshold for the variance contained in sigma")
-    .add_property("dim_c", &bob::machine::IVectorMachine::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::machine::IVectorMachine::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::machine::IVectorMachine::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_rt", &bob::machine::IVectorMachine::getDimRt, "The dimensionality of the Total Variability subspace (rank of T)")
+    .def("resize", &bob::learn::misc::IVectorMachine::resize, (arg("self"), arg("rt")), "Reset the dimensionality of the Total Variability subspace T.")
+    .add_property("ubm", &bob::learn::misc::IVectorMachine::getUbm, &bob::learn::misc::IVectorMachine::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
+    .add_property("t", make_function(&bob::learn::misc::IVectorMachine::getT, return_value_policy<copy_const_reference>()), &py_iv_setT, "The subspace T (Total Variability matrix)")
+    .add_property("sigma", make_function(&bob::learn::misc::IVectorMachine::getSigma, return_value_policy<copy_const_reference>()), &py_iv_setSigma, "The residual matrix of the model sigma")
+    .add_property("variance_threshold", &bob::learn::misc::IVectorMachine::getVarianceThreshold, &bob::learn::misc::IVectorMachine::setVarianceThreshold, "Threshold for the variance contained in sigma")
+    .add_property("dim_c", &bob::learn::misc::IVectorMachine::getDimC, "The number of Gaussian components")
+    .add_property("dim_d", &bob::learn::misc::IVectorMachine::getDimD, "The dimensionality of the feature space")
+    .add_property("dim_cd", &bob::learn::misc::IVectorMachine::getDimCD, "The dimensionality of the supervector space")
+    .add_property("dim_rt", &bob::learn::misc::IVectorMachine::getDimRt, "The dimensionality of the Total Variability subspace (rank of T)")
     .def("__compute_Id_TtSigmaInvT__", &py_computeIdTtSigmaInvT1, (arg("self"), arg("gmmstats"), arg("output")), "Computes (Id + sum_{c=1}^{C} N_{i,j,c} T^{T} Sigma_{c}^{-1} T)")
     .def("__compute_Id_TtSigmaInvT__", &py_computeIdTtSigmaInvT2, (arg("self"), arg("gmmstats")), "Computes (Id + sum_{c=1}^{C} N_{i,j,c} T^{T} Sigma_{c}^{-1} T)")
     .def("__compute_TtSigmaInvFnorm__", &py_computeTtSigmaInvFnorm1, (arg("self"), arg("gmmstats"), arg("output")), "Computes T^{T} Sigma^{-1} sum_{c=1}^{C} (F_c - N_c mean(c))")
diff --git a/bob/learn/misc/old/ivector_trainer.cc b/bob/learn/misc/old/ivector_trainer.cc
index 9e09d0b..e910af9 100644
--- a/bob/learn/misc/old/ivector_trainer.cc
+++ b/bob/learn/misc/old/ivector_trainer.cc
@@ -14,67 +14,67 @@
 
 using namespace boost::python;
 
-typedef bob::trainer::EMTrainer<bob::machine::IVectorMachine, std::vector<bob::machine::GMMStats> > EMTrainerIVectorBase;
+typedef bob::learn::misc::EMTrainer<bob::learn::misc::IVectorMachine, std::vector<bob::learn::misc::GMMStats> > EMTrainerIVectorBase;
 
 static void py_train(EMTrainerIVectorBase& trainer,
-  bob::machine::IVectorMachine& machine, object data)
+  bob::learn::misc::IVectorMachine& machine, object data)
 {
-  stl_input_iterator<bob::machine::GMMStats> dbegin(data), dend;
-  std::vector<bob::machine::GMMStats> vdata(dbegin, dend);
+  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
+  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
   trainer.train(machine, vdata);
 }
 
 static void py_initialize(EMTrainerIVectorBase& trainer,
-  bob::machine::IVectorMachine& machine, object data)
+  bob::learn::misc::IVectorMachine& machine, object data)
 {
-  stl_input_iterator<bob::machine::GMMStats> dbegin(data), dend;
-  std::vector<bob::machine::GMMStats> vdata(dbegin, dend);
+  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
+  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
   trainer.initialize(machine, vdata);
 }
 
 static void py_eStep(EMTrainerIVectorBase& trainer,
-  bob::machine::IVectorMachine& machine, object data)
+  bob::learn::misc::IVectorMachine& machine, object data)
 {
-  stl_input_iterator<bob::machine::GMMStats> dbegin(data), dend;
-  std::vector<bob::machine::GMMStats> vdata(dbegin, dend);
+  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
+  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
   trainer.eStep(machine, vdata);
 }
 
 static void py_mStep(EMTrainerIVectorBase& trainer,
-  bob::machine::IVectorMachine& machine, object data)
+  bob::learn::misc::IVectorMachine& machine, object data)
 {
-  stl_input_iterator<bob::machine::GMMStats> dbegin(data), dend;
-  std::vector<bob::machine::GMMStats> vdata(dbegin, dend);
+  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
+  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
   trainer.mStep(machine, vdata);
 }
 
 static void py_finalize(EMTrainerIVectorBase& trainer,
-  bob::machine::IVectorMachine& machine, object data)
+  bob::learn::misc::IVectorMachine& machine, object data)
 {
-  stl_input_iterator<bob::machine::GMMStats> dbegin(data), dend;
-  std::vector<bob::machine::GMMStats> vdata(dbegin, dend);
+  stl_input_iterator<bob::learn::misc::GMMStats> dbegin(data), dend;
+  std::vector<bob::learn::misc::GMMStats> vdata(dbegin, dend);
   trainer.finalize(machine, vdata);
 }
 
-static void py_set_AccNijWij2(bob::trainer::IVectorTrainer& trainer,
+static void py_set_AccNijWij2(bob::learn::misc::IVectorTrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccNijWij2(acc.bz<double,3>());
 }
 
-static void py_set_AccFnormijWij(bob::trainer::IVectorTrainer& trainer,
+static void py_set_AccFnormijWij(bob::learn::misc::IVectorTrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccFnormijWij(acc.bz<double,3>());
 }
 
-static void py_set_AccNij(bob::trainer::IVectorTrainer& trainer,
+static void py_set_AccNij(bob::learn::misc::IVectorTrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccNij(acc.bz<double,1>());
 }
 
-static void py_set_AccSnormij(bob::trainer::IVectorTrainer& trainer,
+static void py_set_AccSnormij(bob::learn::misc::IVectorTrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccSnormij(acc.bz<double,2>());
@@ -97,14 +97,14 @@ void bind_trainer_ivector()
   ;
 
 
-  class_<bob::trainer::IVectorTrainer, boost::shared_ptr<bob::trainer::IVectorTrainer>, boost::noncopyable, bases<EMTrainerIVectorBase> >("IVectorTrainer", "An trainer to extract i-vector (i.e. for training the Total Variability matrix)\n\nReferences:\n[1] 'Front End Factor Analysis for Speaker Verification', N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet, IEEE Transactions on Audio, Speech and Language Processing, 2010, vol. 19, issue 4, pp. 788-798", init<optional<bool, double, size_t, bool> >((arg("self"), arg("update_sigma")=false, arg("convergence_threshold")=0.001, arg("max_iterations")=10, arg("compute_likelihood")=false), "Builds a new IVectorTrainer."))
-    .def(init<const bob::trainer::IVectorTrainer&>((arg("self"), arg("trainer")), "Copy constructs an IVectorTrainer"))
+  class_<bob::learn::misc::IVectorTrainer, boost::shared_ptr<bob::learn::misc::IVectorTrainer>, boost::noncopyable, bases<EMTrainerIVectorBase> >("IVectorTrainer", "An trainer to extract i-vector (i.e. for training the Total Variability matrix)\n\nReferences:\n[1] 'Front End Factor Analysis for Speaker Verification', N. Dehak, P. Kenny, R. Dehak, P. Dumouchel, P. Ouellet, IEEE Transactions on Audio, Speech and Language Processing, 2010, vol. 19, issue 4, pp. 788-798", init<optional<bool, double, size_t, bool> >((arg("self"), arg("update_sigma")=false, arg("convergence_threshold")=0.001, arg("max_iterations")=10, arg("compute_likelihood")=false), "Builds a new IVectorTrainer."))
+    .def(init<const bob::learn::misc::IVectorTrainer&>((arg("self"), arg("trainer")), "Copy constructs an IVectorTrainer"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::trainer::IVectorTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this IVectorTrainer with the 'other' one to be approximately the same.")
-    .add_property("acc_nij_wij2", make_function(&bob::trainer::IVectorTrainer::getAccNijWij2, return_value_policy<copy_const_reference>()), &py_set_AccNijWij2, "Accumulator updated during the E-step")
-    .add_property("acc_fnormij_wij", make_function(&bob::trainer::IVectorTrainer::getAccFnormijWij, return_value_policy<copy_const_reference>()), &py_set_AccFnormijWij, "Accumulator updated during the E-step")
-    .add_property("acc_nij", make_function(&bob::trainer::IVectorTrainer::getAccNij, return_value_policy<copy_const_reference>()), &py_set_AccNij, "Accumulator updated during the E-step")
-    .add_property("acc_snormij", make_function(&bob::trainer::IVectorTrainer::getAccSnormij, return_value_policy<copy_const_reference>()), &py_set_AccSnormij, "Accumulator updated during the E-step")
+    .def("is_similar_to", &bob::learn::misc::IVectorTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this IVectorTrainer with the 'other' one to be approximately the same.")
+    .add_property("acc_nij_wij2", make_function(&bob::learn::misc::IVectorTrainer::getAccNijWij2, return_value_policy<copy_const_reference>()), &py_set_AccNijWij2, "Accumulator updated during the E-step")
+    .add_property("acc_fnormij_wij", make_function(&bob::learn::misc::IVectorTrainer::getAccFnormijWij, return_value_policy<copy_const_reference>()), &py_set_AccFnormijWij, "Accumulator updated during the E-step")
+    .add_property("acc_nij", make_function(&bob::learn::misc::IVectorTrainer::getAccNij, return_value_policy<copy_const_reference>()), &py_set_AccNij, "Accumulator updated during the E-step")
+    .add_property("acc_snormij", make_function(&bob::learn::misc::IVectorTrainer::getAccSnormij, return_value_policy<copy_const_reference>()), &py_set_AccSnormij, "Accumulator updated during the E-step")
   ;
 }
diff --git a/bob/learn/misc/old/jfa.cc b/bob/learn/misc/old/jfa.cc
index 8f58b3b..b5cc24a 100644
--- a/bob/learn/misc/old/jfa.cc
+++ b/bob/learn/misc/old/jfa.cc
@@ -18,50 +18,50 @@
 
 using namespace boost::python;
 
-static void py_jfa_setU(bob::machine::JFABase& machine,
+static void py_jfa_setU(bob::learn::misc::JFABase& machine,
   bob::python::const_ndarray U)
 {
   machine.setU(U.bz<double,2>());
 }
 
-static void py_jfa_setV(bob::machine::JFABase& machine,
+static void py_jfa_setV(bob::learn::misc::JFABase& machine,
   bob::python::const_ndarray V)
 {
   machine.setV(V.bz<double,2>());
 }
 
-static void py_jfa_setD(bob::machine::JFABase& machine,
+static void py_jfa_setD(bob::learn::misc::JFABase& machine,
   bob::python::const_ndarray D)
 {
   machine.setD(D.bz<double,1>());
 }
 
-static void py_jfa_setY(bob::machine::JFAMachine& machine, bob::python::const_ndarray Y) {
+static void py_jfa_setY(bob::learn::misc::JFAMachine& machine, bob::python::const_ndarray Y) {
   const blitz::Array<double,1>& Y_ = Y.bz<double,1>();
   machine.setY(Y_);
 }
 
-static void py_jfa_setZ(bob::machine::JFAMachine& machine, bob::python::const_ndarray Z) {
+static void py_jfa_setZ(bob::learn::misc::JFAMachine& machine, bob::python::const_ndarray Z) {
   const blitz::Array<double,1> Z_ = Z.bz<double,1>();
   machine.setZ(Z_);
 }
 
-static void py_jfa_estimateX(bob::machine::JFAMachine& machine,
-  const bob::machine::GMMStats& gmm_stats, bob::python::ndarray x)
+static void py_jfa_estimateX(bob::learn::misc::JFAMachine& machine,
+  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray x)
 {
   blitz::Array<double,1> x_ = x.bz<double,1>();
   machine.estimateX(gmm_stats, x_);
 }
 
-static void py_jfa_estimateUx(bob::machine::JFAMachine& machine,
-  const bob::machine::GMMStats& gmm_stats, bob::python::ndarray ux)
+static void py_jfa_estimateUx(bob::learn::misc::JFAMachine& machine,
+  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray ux)
 {
   blitz::Array<double,1> ux_ = ux.bz<double,1>();
   machine.estimateUx(gmm_stats, ux_);
 }
 
-static double py_jfa_forwardUx(bob::machine::JFAMachine& machine,
-  const bob::machine::GMMStats& gmm_stats, bob::python::const_ndarray ux)
+static double py_jfa_forwardUx(bob::learn::misc::JFAMachine& machine,
+  const bob::learn::misc::GMMStats& gmm_stats, bob::python::const_ndarray ux)
 {
   double score;
   machine.forward(gmm_stats, ux.bz<double,1>(), score);
@@ -69,38 +69,38 @@ static double py_jfa_forwardUx(bob::machine::JFAMachine& machine,
 }
 
 
-static void py_isv_setU(bob::machine::ISVBase& machine,
+static void py_isv_setU(bob::learn::misc::ISVBase& machine,
   bob::python::const_ndarray U)
 {
   machine.setU(U.bz<double,2>());
 }
 
-static void py_isv_setD(bob::machine::ISVBase& machine,
+static void py_isv_setD(bob::learn::misc::ISVBase& machine,
   bob::python::const_ndarray D)
 {
   machine.setD(D.bz<double,1>());
 }
 
-static void py_isv_setZ(bob::machine::ISVMachine& machine, bob::python::const_ndarray Z) {
+static void py_isv_setZ(bob::learn::misc::ISVMachine& machine, bob::python::const_ndarray Z) {
   machine.setZ(Z.bz<double,1>());
 }
 
-static void py_isv_estimateX(bob::machine::ISVMachine& machine,
-  const bob::machine::GMMStats& gmm_stats, bob::python::ndarray x)
+static void py_isv_estimateX(bob::learn::misc::ISVMachine& machine,
+  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray x)
 {
   blitz::Array<double,1> x_ = x.bz<double,1>();
   machine.estimateX(gmm_stats, x_);
 }
 
-static void py_isv_estimateUx(bob::machine::ISVMachine& machine,
-  const bob::machine::GMMStats& gmm_stats, bob::python::ndarray ux)
+static void py_isv_estimateUx(bob::learn::misc::ISVMachine& machine,
+  const bob::learn::misc::GMMStats& gmm_stats, bob::python::ndarray ux)
 {
   blitz::Array<double,1> ux_ = ux.bz<double,1>();
   machine.estimateUx(gmm_stats, ux_);
 }
 
-static double py_isv_forwardUx(bob::machine::ISVMachine& machine,
-  const bob::machine::GMMStats& gmm_stats, bob::python::const_ndarray ux)
+static double py_isv_forwardUx(bob::learn::misc::ISVMachine& machine,
+  const bob::learn::misc::GMMStats& gmm_stats, bob::python::const_ndarray ux)
 {
   double score;
   machine.forward(gmm_stats, ux.bz<double,1>(), score);
@@ -108,107 +108,107 @@ static double py_isv_forwardUx(bob::machine::ISVMachine& machine,
 }
 
 
-static double py_gen1_forward(const bob::machine::Machine<bob::machine::GMMStats, double>& m,
-  const bob::machine::GMMStats& stats)
+static double py_gen1_forward(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, double>& m,
+  const bob::learn::misc::GMMStats& stats)
 {
   double output;
   m.forward(stats, output);
   return output;
 }
 
-static double py_gen1_forward_(const bob::machine::Machine<bob::machine::GMMStats, double>& m,
-  const bob::machine::GMMStats& stats)
+static double py_gen1_forward_(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, double>& m,
+  const bob::learn::misc::GMMStats& stats)
 {
   double output;
   m.forward_(stats, output);
   return output;
 }
 
-static void py_gen2b_forward(const bob::machine::Machine<bob::machine::GMMStats, blitz::Array<double,1> >& m,
-  const bob::machine::GMMStats& stats, bob::python::const_ndarray output)
+static void py_gen2b_forward(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >& m,
+  const bob::learn::misc::GMMStats& stats, bob::python::const_ndarray output)
 {
   blitz::Array<double,1> output_ = output.bz<double,1>();
   m.forward(stats, output_);
 }
 
-static void py_gen2b_forward_(const bob::machine::Machine<bob::machine::GMMStats, blitz::Array<double,1> >& m,
-  const bob::machine::GMMStats& stats, bob::python::const_ndarray output)
+static void py_gen2b_forward_(const bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >& m,
+  const bob::learn::misc::GMMStats& stats, bob::python::const_ndarray output)
 {
   blitz::Array<double,1> output_ = output.bz<double,1>();
   m.forward_(stats, output_);
 }
 
 
-static boost::shared_ptr<bob::machine::JFABase> jb_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::JFABase> jb_init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::JFABase>(new bob::machine::JFABase(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::JFABase>(new bob::learn::misc::JFABase(*hdf5->f));
 }
 
-static void jb_load(bob::machine::JFABase& self, boost::python::object file){
+static void jb_load(bob::learn::misc::JFABase& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void jb_save(const bob::machine::JFABase& self, boost::python::object file){
+static void jb_save(const bob::learn::misc::JFABase& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
 }
 
 
-static boost::shared_ptr<bob::machine::JFAMachine> jm_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::JFAMachine> jm_init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::JFAMachine>(new bob::machine::JFAMachine(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::JFAMachine>(new bob::learn::misc::JFAMachine(*hdf5->f));
 }
 
-static void jm_load(bob::machine::JFAMachine& self, boost::python::object file){
+static void jm_load(bob::learn::misc::JFAMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void jm_save(const bob::machine::JFAMachine& self, boost::python::object file){
+static void jm_save(const bob::learn::misc::JFAMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
 }
 
 
-static boost::shared_ptr<bob::machine::ISVBase> ib_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::ISVBase> ib_init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::ISVBase>(new bob::machine::ISVBase(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::ISVBase>(new bob::learn::misc::ISVBase(*hdf5->f));
 }
 
-static void ib_load(bob::machine::ISVBase& self, boost::python::object file){
+static void ib_load(bob::learn::misc::ISVBase& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void ib_save(const bob::machine::ISVBase& self, boost::python::object file){
+static void ib_save(const bob::learn::misc::ISVBase& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
 }
 
 
-static boost::shared_ptr<bob::machine::ISVMachine> im_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::ISVMachine> im_init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::ISVMachine>(new bob::machine::ISVMachine(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::ISVMachine>(new bob::learn::misc::ISVMachine(*hdf5->f));
 }
 
-static void im_load(bob::machine::ISVMachine& self, boost::python::object file){
+static void im_load(bob::learn::misc::ISVMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void im_save(const bob::machine::ISVMachine& self, boost::python::object file){
+static void im_save(const bob::learn::misc::ISVMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
@@ -216,106 +216,106 @@ static void im_save(const bob::machine::ISVMachine& self, boost::python::object
 
 void bind_machine_jfa()
 {
-  class_<bob::machine::Machine<bob::machine::GMMStats, double>, boost::noncopyable>("MachineGMMStatsScalarBase",
-      "Root class for all Machine<bob::machine::GMMStats, double>", no_init)
+  class_<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double>, boost::noncopyable>("MachineGMMStatsScalarBase",
+      "Root class for all Machine<bob::learn::misc::GMMStats, double>", no_init)
     .def("__call__", &py_gen1_forward_, (arg("self"), arg("input")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
     .def("forward", &py_gen1_forward, (arg("self"), arg("input")), "Executes the machine on the GMMStats, and returns the (scalar) output.")
     .def("forward_", &py_gen1_forward_, (arg("self"), arg("input")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
   ;
 
-  class_<bob::machine::Machine<bob::machine::GMMStats, blitz::Array<double,1> >, boost::noncopyable>("MachineGMMStatsA1DBase",
-      "Root class for all Machine<bob::machine::GMMStats, blitz::Array<double,1>", no_init)
+  class_<bob::learn::misc::Machine<bob::learn::misc::GMMStats, blitz::Array<double,1> >, boost::noncopyable>("MachineGMMStatsA1DBase",
+      "Root class for all Machine<bob::learn::misc::GMMStats, blitz::Array<double,1>", no_init)
     .def("__call__", &py_gen2b_forward_, (arg("self"), arg("input"), arg("output")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
     .def("forward", &py_gen2b_forward, (arg("self"), arg("input"), arg("output")), "Executes the machine on the GMMStats, and returns the (scalar) output.")
     .def("forward_", &py_gen2b_forward_, (arg("self"), arg("input"), arg("output")), "Executes the machine on the GMMStats, and returns the (scalar) output. NO CHECK is performed.")
   ;
 
 
-  class_<bob::machine::JFABase, boost::shared_ptr<bob::machine::JFABase>, bases<bob::machine::Machine<bob::machine::GMMStats, double> > >("JFABase", "A JFABase instance can be seen as a container for U, V and D when performing Joint Factor Analysis (JFA).\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
+  class_<bob::learn::misc::JFABase, boost::shared_ptr<bob::learn::misc::JFABase>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("JFABase", "A JFABase instance can be seen as a container for U, V and D when performing Joint Factor Analysis (JFA).\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
     .def("__init__", boost::python::make_constructor(&jb_init), "Constructs a new JFABaseMachine from a configuration file.")
-    .def(init<const boost::shared_ptr<bob::machine::GMMMachine>, optional<const size_t, const size_t> >((arg("self"), arg("ubm"), arg("ru")=1, arg("rv")=1), "Builds a new JFABase."))
+    .def(init<const boost::shared_ptr<bob::learn::misc::GMMMachine>, optional<const size_t, const size_t> >((arg("self"), arg("ubm"), arg("ru")=1, arg("rv")=1), "Builds a new JFABase."))
     .def(init<>((arg("self")), "Constructs a 1x1 JFABase instance. You have to set a UBM GMM and resize the U, V and D subspaces afterwards."))
-    .def(init<const bob::machine::JFABase&>((arg("self"), arg("machine")), "Copy constructs a JFABase"))
+    .def(init<const bob::learn::misc::JFABase&>((arg("self"), arg("machine")), "Copy constructs a JFABase"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::JFABase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFABase with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::JFABase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFABase with the 'other' one to be approximately the same.")
     .def("load", &jb_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
     .def("save", &jb_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("resize", &bob::machine::JFABase::resize, (arg("self"), arg("ru"), arg("rv")), "Reset the dimensionality of the subspaces U and V.")
-    .add_property("ubm", &bob::machine::JFABase::getUbm, &bob::machine::JFABase::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
-    .add_property("u", make_function(&bob::machine::JFABase::getU, return_value_policy<copy_const_reference>()), &py_jfa_setU, "The subspace U for within-class variations")
-    .add_property("v", make_function(&bob::machine::JFABase::getV, return_value_policy<copy_const_reference>()), &py_jfa_setV, "The subspace V for between-class variations")
-    .add_property("d", make_function(&bob::machine::JFABase::getD, return_value_policy<copy_const_reference>()), &py_jfa_setD, "The subspace D for residual variations")
-    .add_property("dim_c", &bob::machine::JFABase::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::machine::JFABase::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::machine::JFABase::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::machine::JFABase::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
-    .add_property("dim_rv", &bob::machine::JFABase::getDimRv, "The dimensionality of the between-class variations subspace (rank of V)")
+    .def("resize", &bob::learn::misc::JFABase::resize, (arg("self"), arg("ru"), arg("rv")), "Reset the dimensionality of the subspaces U and V.")
+    .add_property("ubm", &bob::learn::misc::JFABase::getUbm, &bob::learn::misc::JFABase::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
+    .add_property("u", make_function(&bob::learn::misc::JFABase::getU, return_value_policy<copy_const_reference>()), &py_jfa_setU, "The subspace U for within-class variations")
+    .add_property("v", make_function(&bob::learn::misc::JFABase::getV, return_value_policy<copy_const_reference>()), &py_jfa_setV, "The subspace V for between-class variations")
+    .add_property("d", make_function(&bob::learn::misc::JFABase::getD, return_value_policy<copy_const_reference>()), &py_jfa_setD, "The subspace D for residual variations")
+    .add_property("dim_c", &bob::learn::misc::JFABase::getDimC, "The number of Gaussian components")
+    .add_property("dim_d", &bob::learn::misc::JFABase::getDimD, "The dimensionality of the feature space")
+    .add_property("dim_cd", &bob::learn::misc::JFABase::getDimCD, "The dimensionality of the supervector space")
+    .add_property("dim_ru", &bob::learn::misc::JFABase::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
+    .add_property("dim_rv", &bob::learn::misc::JFABase::getDimRv, "The dimensionality of the between-class variations subspace (rank of V)")
   ;
 
-  class_<bob::machine::JFAMachine, boost::shared_ptr<bob::machine::JFAMachine>, bases<bob::machine::Machine<bob::machine::GMMStats, double> > >("JFAMachine", "A JFAMachine. An attached JFABase should be provided for Joint Factor Analysis. The JFAMachine carries information about the speaker factors y and z, whereas a JFABase carries information about the matrices U, V and D.\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
+  class_<bob::learn::misc::JFAMachine, boost::shared_ptr<bob::learn::misc::JFAMachine>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("JFAMachine", "A JFAMachine. An attached JFABase should be provided for Joint Factor Analysis. The JFAMachine carries information about the speaker factors y and z, whereas a JFABase carries information about the matrices U, V and D.\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
     .def("__init__", boost::python::make_constructor(&jm_init), "Constructs a new JFAMachine from a configuration file.")
     .def(init<>((arg("self")), "Constructs a 1x1 JFAMachine instance. You have to set a JFABase afterwards."))
-    .def(init<const boost::shared_ptr<bob::machine::JFABase> >((arg("self"), arg("jfa_base")), "Builds a new JFAMachine."))
-    .def(init<const bob::machine::JFAMachine&>((arg("self"), arg("machine")), "Copy constructs a JFAMachine"))
+    .def(init<const boost::shared_ptr<bob::learn::misc::JFABase> >((arg("self"), arg("jfa_base")), "Builds a new JFAMachine."))
+    .def(init<const bob::learn::misc::JFAMachine&>((arg("self"), arg("machine")), "Copy constructs a JFAMachine"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::JFAMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFABase with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::JFAMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFABase with the 'other' one to be approximately the same.")
     .def("load", &jm_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
     .def("save", &jm_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
     .def("estimate_x", &py_jfa_estimateX, (arg("self"), arg("stats"), arg("x")), "Estimates the session offset x (LPT assumption) given GMM statistics.")
     .def("estimate_ux", &py_jfa_estimateUx, (arg("self"), arg("stats"), arg("ux")), "Estimates Ux (LPT assumption) given GMM statistics.")
     .def("forward_ux", &py_jfa_forwardUx, (arg("self"), arg("stats"), arg("ux")), "Processes the GMM statistics and Ux to return a score.")
-    .add_property("jfa_base", &bob::machine::JFAMachine::getJFABase, &bob::machine::JFAMachine::setJFABase, "The JFABase attached to this machine")
-    .add_property("__x__", make_function(&bob::machine::JFAMachine::getX, return_value_policy<copy_const_reference>()), "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines.")
-    .add_property("y", make_function(&bob::machine::JFAMachine::getY, return_value_policy<copy_const_reference>()), &py_jfa_setY, "The latent variable y of this machine")
-    .add_property("z", make_function(&bob::machine::JFAMachine::getZ, return_value_policy<copy_const_reference>()), &py_jfa_setZ, "The latent variable z of this machine")
-    .add_property("dim_c", &bob::machine::JFAMachine::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::machine::JFAMachine::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::machine::JFAMachine::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::machine::JFAMachine::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
-    .add_property("dim_rv", &bob::machine::JFAMachine::getDimRv, "The dimensionality of the between-class variations subspace (rank of V)")
+    .add_property("jfa_base", &bob::learn::misc::JFAMachine::getJFABase, &bob::learn::misc::JFAMachine::setJFABase, "The JFABase attached to this machine")
+    .add_property("__x__", make_function(&bob::learn::misc::JFAMachine::getX, return_value_policy<copy_const_reference>()), "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines.")
+    .add_property("y", make_function(&bob::learn::misc::JFAMachine::getY, return_value_policy<copy_const_reference>()), &py_jfa_setY, "The latent variable y of this machine")
+    .add_property("z", make_function(&bob::learn::misc::JFAMachine::getZ, return_value_policy<copy_const_reference>()), &py_jfa_setZ, "The latent variable z of this machine")
+    .add_property("dim_c", &bob::learn::misc::JFAMachine::getDimC, "The number of Gaussian components")
+    .add_property("dim_d", &bob::learn::misc::JFAMachine::getDimD, "The dimensionality of the feature space")
+    .add_property("dim_cd", &bob::learn::misc::JFAMachine::getDimCD, "The dimensionality of the supervector space")
+    .add_property("dim_ru", &bob::learn::misc::JFAMachine::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
+    .add_property("dim_rv", &bob::learn::misc::JFAMachine::getDimRv, "The dimensionality of the between-class variations subspace (rank of V)")
   ;
 
-  class_<bob::machine::ISVBase, boost::shared_ptr<bob::machine::ISVBase>, bases<bob::machine::Machine<bob::machine::GMMStats, double> > >("ISVBase", "An ISVBase instance can be seen as a container for U and D when performing Joint Factor Analysis (ISV). \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
+  class_<bob::learn::misc::ISVBase, boost::shared_ptr<bob::learn::misc::ISVBase>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("ISVBase", "An ISVBase instance can be seen as a container for U and D when performing Joint Factor Analysis (ISV). \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
     .def("__init__", boost::python::make_constructor(&ib_init), "Constructs a new ISVBaseMachine from a configuration file.")
-    .def(init<const boost::shared_ptr<bob::machine::GMMMachine>, optional<const size_t> >((arg("self"), arg("ubm"), arg("ru")=1), "Builds a new ISVBase."))
+    .def(init<const boost::shared_ptr<bob::learn::misc::GMMMachine>, optional<const size_t> >((arg("self"), arg("ubm"), arg("ru")=1), "Builds a new ISVBase."))
     .def(init<>((arg("self")), "Constructs a 1 ISVBase instance. You have to set a UBM GMM and resize the U and D subspaces afterwards."))
-    .def(init<const bob::machine::ISVBase&>((arg("self"), arg("machine")), "Copy constructs an ISVBase"))
+    .def(init<const bob::learn::misc::ISVBase&>((arg("self"), arg("machine")), "Copy constructs an ISVBase"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::ISVBase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVBase with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::ISVBase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVBase with the 'other' one to be approximately the same.")
     .def("load", &ib_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
     .def("save", &ib_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .def("resize", &bob::machine::ISVBase::resize, (arg("self"), arg("ru")), "Reset the dimensionality of the subspaces U.")
-    .add_property("ubm", &bob::machine::ISVBase::getUbm, &bob::machine::ISVBase::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
-    .add_property("u", make_function(&bob::machine::ISVBase::getU, return_value_policy<copy_const_reference>()), &py_isv_setU, "The subspace U for within-class variations")
-    .add_property("d", make_function(&bob::machine::ISVBase::getD, return_value_policy<copy_const_reference>()), &py_isv_setD, "The subspace D for residual variations")
-    .add_property("dim_c", &bob::machine::ISVBase::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::machine::ISVBase::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::machine::ISVBase::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::machine::ISVBase::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
+    .def("resize", &bob::learn::misc::ISVBase::resize, (arg("self"), arg("ru")), "Reset the dimensionality of the subspaces U.")
+    .add_property("ubm", &bob::learn::misc::ISVBase::getUbm, &bob::learn::misc::ISVBase::setUbm, "The UBM GMM attached to this Joint Factor Analysis model")
+    .add_property("u", make_function(&bob::learn::misc::ISVBase::getU, return_value_policy<copy_const_reference>()), &py_isv_setU, "The subspace U for within-class variations")
+    .add_property("d", make_function(&bob::learn::misc::ISVBase::getD, return_value_policy<copy_const_reference>()), &py_isv_setD, "The subspace D for residual variations")
+    .add_property("dim_c", &bob::learn::misc::ISVBase::getDimC, "The number of Gaussian components")
+    .add_property("dim_d", &bob::learn::misc::ISVBase::getDimD, "The dimensionality of the feature space")
+    .add_property("dim_cd", &bob::learn::misc::ISVBase::getDimCD, "The dimensionality of the supervector space")
+    .add_property("dim_ru", &bob::learn::misc::ISVBase::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
   ;
 
-  class_<bob::machine::ISVMachine, boost::shared_ptr<bob::machine::ISVMachine>, bases<bob::machine::Machine<bob::machine::GMMStats, double> > >("ISVMachine", "An ISVMachine. An attached ISVBase should be provided for Inter-session Variability Modelling. The ISVMachine carries information about the speaker factors z, whereas a ISVBase carries information about the matrices U and D. \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
+  class_<bob::learn::misc::ISVMachine, boost::shared_ptr<bob::learn::misc::ISVMachine>, bases<bob::learn::misc::Machine<bob::learn::misc::GMMStats, double> > >("ISVMachine", "An ISVMachine. An attached ISVBase should be provided for Inter-session Variability Modelling. The ISVMachine carries information about the speaker factors z, whereas a ISVBase carries information about the matrices U and D. \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", no_init)
     .def("__init__", boost::python::make_constructor(&im_init), "Constructs a new ISVMachine from a configuration file.")
     .def(init<>((arg("self")), "Constructs a 1 ISVMachine instance. You have to set a ISVBase afterwards."))
-    .def(init<const boost::shared_ptr<bob::machine::ISVBase> >((arg("self"), arg("isv_base")), "Builds a new ISVMachine."))
-    .def(init<const bob::machine::ISVMachine&>((arg("self"), arg("machine")), "Copy constructs an ISVMachine"))
+    .def(init<const boost::shared_ptr<bob::learn::misc::ISVBase> >((arg("self"), arg("isv_base")), "Builds a new ISVMachine."))
+    .def(init<const bob::learn::misc::ISVMachine&>((arg("self"), arg("machine")), "Copy constructs an ISVMachine"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::ISVMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVBase with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::ISVMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVBase with the 'other' one to be approximately the same.")
     .def("load", &im_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
     .def("save", &im_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
     .def("estimate_x", &py_isv_estimateX, (arg("self"), arg("stats"), arg("x")), "Estimates the session offset x (LPT assumption) given GMM statistics.")
     .def("estimate_ux", &py_isv_estimateUx, (arg("self"), arg("stats"), arg("ux")), "Estimates Ux (LPT assumption) given GMM statistics.")
     .def("forward_ux", &py_isv_forwardUx, (arg("self"), arg("stats"), arg("ux")), "Processes the GMM statistics and Ux to return a score.")
-    .add_property("isv_base", &bob::machine::ISVMachine::getISVBase, &bob::machine::ISVMachine::setISVBase, "The ISVBase attached to this machine")
-    .add_property("__x__", make_function(&bob::machine::ISVMachine::getX, return_value_policy<copy_const_reference>()), "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines.")
-    .add_property("z", make_function(&bob::machine::ISVMachine::getZ, return_value_policy<copy_const_reference>()), &py_isv_setZ, "The latent variable z of this machine")
-    .add_property("dim_c", &bob::machine::ISVMachine::getDimC, "The number of Gaussian components")
-    .add_property("dim_d", &bob::machine::ISVMachine::getDimD, "The dimensionality of the feature space")
-    .add_property("dim_cd", &bob::machine::ISVMachine::getDimCD, "The dimensionality of the supervector space")
-    .add_property("dim_ru", &bob::machine::ISVMachine::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
+    .add_property("isv_base", &bob::learn::misc::ISVMachine::getISVBase, &bob::learn::misc::ISVMachine::setISVBase, "The ISVBase attached to this machine")
+    .add_property("__x__", make_function(&bob::learn::misc::ISVMachine::getX, return_value_policy<copy_const_reference>()), "The latent variable x (last one computed). This is a feature provided for convenience, but this attribute is not 'part' of the machine. The session latent variable x is indeed not class-specific, but depends on the sample considered. Furthermore, it is not saved into the machine or used when comparing machines.")
+    .add_property("z", make_function(&bob::learn::misc::ISVMachine::getZ, return_value_policy<copy_const_reference>()), &py_isv_setZ, "The latent variable z of this machine")
+    .add_property("dim_c", &bob::learn::misc::ISVMachine::getDimC, "The number of Gaussian components")
+    .add_property("dim_d", &bob::learn::misc::ISVMachine::getDimD, "The dimensionality of the feature space")
+    .add_property("dim_cd", &bob::learn::misc::ISVMachine::getDimCD, "The dimensionality of the supervector space")
+    .add_property("dim_ru", &bob::learn::misc::ISVMachine::getDimRu, "The dimensionality of the within-class variations subspace (rank of U)")
   ;
 }
diff --git a/bob/learn/misc/old/jfa_trainer.cc b/bob/learn/misc/old/jfa_trainer.cc
index 771f16e..b530082 100644
--- a/bob/learn/misc/old/jfa_trainer.cc
+++ b/bob/learn/misc/old/jfa_trainer.cc
@@ -23,76 +23,76 @@ static object vector_as_list(const std::vector<blitz::Array<double,N> >& vec)
 }
 
 static void extract_GMMStats(object data,
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > >& training_data)
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > >& training_data)
 {
   stl_input_iterator<object> dbegin(data), dend;
   std::vector<object> vvdata(dbegin, dend);
   for (size_t i=0; i<vvdata.size(); ++i)
   {
-    stl_input_iterator<boost::shared_ptr<bob::machine::GMMStats> > dlbegin(vvdata[i]), dlend;
-    training_data.push_back(std::vector<boost::shared_ptr<bob::machine::GMMStats> >(dlbegin, dlend));
+    stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dlbegin(vvdata[i]), dlend;
+    training_data.push_back(std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> >(dlbegin, dlend));
   }
 }
 
-static void isv_train(bob::trainer::ISVTrainer& t, bob::machine::ISVBase& m, object data)
+static void isv_train(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the train function
   t.train(m, training_data);
 }
 
-static void isv_initialize(bob::trainer::ISVTrainer& t, bob::machine::ISVBase& m, object data)
+static void isv_initialize(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the initialize function
   t.initialize(m, training_data);
 }
 
-static void isv_estep(bob::trainer::ISVTrainer& t, bob::machine::ISVBase& m, object data)
+static void isv_estep(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the E-Step function
   t.eStep(m, training_data);
 }
 
-static void isv_mstep(bob::trainer::ISVTrainer& t, bob::machine::ISVBase& m, object data)
+static void isv_mstep(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the M-Step function
   t.mStep(m, training_data);
 }
 
-static void isv_finalize(bob::trainer::ISVTrainer& t, bob::machine::ISVBase& m, object data)
+static void isv_finalize(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVBase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the finalization function
   t.finalize(m, training_data);
 }
 
-static void isv_enrol(bob::trainer::ISVTrainer& t, bob::machine::ISVMachine& m, object data, const size_t n_iter)
+static void isv_enrol(bob::learn::misc::ISVTrainer& t, bob::learn::misc::ISVMachine& m, object data, const size_t n_iter)
 {
-  stl_input_iterator<boost::shared_ptr<bob::machine::GMMStats> > dlbegin(data), dlend;
-  std::vector<boost::shared_ptr<bob::machine::GMMStats> > vdata(dlbegin, dlend);
+  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dlbegin(data), dlend;
+  std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > vdata(dlbegin, dlend);
   // Calls the enrol function
   t.enrol(m, vdata, n_iter);
 }
 
-static object isv_get_x(const bob::trainer::ISVTrainer& t)
+static object isv_get_x(const bob::learn::misc::ISVTrainer& t)
 {
   return vector_as_list(t.getX());
 }
 
-static object isv_get_z(const bob::trainer::ISVTrainer& t)
+static object isv_get_z(const bob::learn::misc::ISVTrainer& t)
 {
   return vector_as_list(t.getZ());
 }
 
-static void isv_set_x(bob::trainer::ISVTrainer& t, object data)
+static void isv_set_x(bob::learn::misc::ISVTrainer& t, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
   std::vector<blitz::Array<double,2> > vdata_ref;
@@ -101,7 +101,7 @@ static void isv_set_x(bob::trainer::ISVTrainer& t, object data)
   t.setX(vdata_ref);
 }
 
-static void isv_set_z(bob::trainer::ISVTrainer& t, object data)
+static void isv_set_z(bob::learn::misc::ISVTrainer& t, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
   std::vector<blitz::Array<double,1> > vdata_ref;
@@ -111,13 +111,13 @@ static void isv_set_z(bob::trainer::ISVTrainer& t, object data)
 }
 
 
-static void isv_set_accUA1(bob::trainer::ISVTrainer& trainer,
+static void isv_set_accUA1(bob::learn::misc::ISVTrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccUA1(acc.bz<double,3>());
 }
 
-static void isv_set_accUA2(bob::trainer::ISVTrainer& trainer,
+static void isv_set_accUA2(bob::learn::misc::ISVTrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccUA2(acc.bz<double,2>());
@@ -125,126 +125,126 @@ static void isv_set_accUA2(bob::trainer::ISVTrainer& trainer,
 
 
 
-static void jfa_train(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_train(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the train function
   t.train(m, training_data);
 }
 
-static void jfa_initialize(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_initialize(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the initialize function
   t.initialize(m, training_data);
 }
 
-static void jfa_estep1(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_estep1(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the E-Step function
   t.eStep1(m, training_data);
 }
 
-static void jfa_mstep1(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_mstep1(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the M-Step function
   t.mStep1(m, training_data);
 }
 
-static void jfa_finalize1(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_finalize1(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the finalization function
   t.finalize1(m, training_data);
 }
 
-static void jfa_estep2(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_estep2(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the E-Step function
   t.eStep2(m, training_data);
 }
 
-static void jfa_mstep2(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_mstep2(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the M-Step function
   t.mStep2(m, training_data);
 }
 
-static void jfa_finalize2(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_finalize2(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the finalization function
   t.finalize2(m, training_data);
 }
 
-static void jfa_estep3(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_estep3(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the E-Step function
   t.eStep3(m, training_data);
 }
 
-static void jfa_mstep3(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_mstep3(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the M-Step function
   t.mStep3(m, training_data);
 }
 
-static void jfa_finalize3(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_finalize3(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the finalization function
   t.finalize3(m, training_data);
 }
 
-static void jfa_train_loop(bob::trainer::JFATrainer& t, bob::machine::JFABase& m, object data)
+static void jfa_train_loop(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFABase& m, object data)
 {
-  std::vector<std::vector<boost::shared_ptr<bob::machine::GMMStats> > > training_data;
+  std::vector<std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > > training_data;
   extract_GMMStats(data, training_data);
   // Calls the main loop function
   t.train_loop(m, training_data);
 }
 
-static void jfa_enrol(bob::trainer::JFATrainer& t, bob::machine::JFAMachine& m, object data, const size_t n_iter)
+static void jfa_enrol(bob::learn::misc::JFATrainer& t, bob::learn::misc::JFAMachine& m, object data, const size_t n_iter)
 {
-  stl_input_iterator<boost::shared_ptr<bob::machine::GMMStats> > dlbegin(data), dlend;
-  std::vector<boost::shared_ptr<bob::machine::GMMStats> > vdata(dlbegin, dlend);
+  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dlbegin(data), dlend;
+  std::vector<boost::shared_ptr<bob::learn::misc::GMMStats> > vdata(dlbegin, dlend);
   // Calls the enrol function
   t.enrol(m, vdata, n_iter);
 }
 
-static object jfa_get_x(const bob::trainer::JFATrainer& t)
+static object jfa_get_x(const bob::learn::misc::JFATrainer& t)
 {
   return vector_as_list(t.getX());
 }
 
-static object jfa_get_y(const bob::trainer::JFATrainer& t)
+static object jfa_get_y(const bob::learn::misc::JFATrainer& t)
 {
   return vector_as_list(t.getY());
 }
 
-static object jfa_get_z(const bob::trainer::JFATrainer& t)
+static object jfa_get_z(const bob::learn::misc::JFATrainer& t)
 {
   return vector_as_list(t.getZ());
 }
 
-static void jfa_set_x(bob::trainer::JFATrainer& t, object data)
+static void jfa_set_x(bob::learn::misc::JFATrainer& t, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
   std::vector<blitz::Array<double,2> > vdata_ref;
@@ -253,7 +253,7 @@ static void jfa_set_x(bob::trainer::JFATrainer& t, object data)
   t.setX(vdata_ref);
 }
 
-static void jfa_set_y(bob::trainer::JFATrainer& t, object data)
+static void jfa_set_y(bob::learn::misc::JFATrainer& t, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
   std::vector<blitz::Array<double,1> > vdata_ref;
@@ -262,7 +262,7 @@ static void jfa_set_y(bob::trainer::JFATrainer& t, object data)
   t.setY(vdata_ref);
 }
 
-static void jfa_set_z(bob::trainer::JFATrainer& t, object data)
+static void jfa_set_z(bob::learn::misc::JFATrainer& t, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> vdata(data), dend;
   std::vector<blitz::Array<double,1> > vdata_ref;
@@ -272,37 +272,37 @@ static void jfa_set_z(bob::trainer::JFATrainer& t, object data)
 }
 
 
-static void jfa_set_accUA1(bob::trainer::JFATrainer& trainer,
+static void jfa_set_accUA1(bob::learn::misc::JFATrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccUA1(acc.bz<double,3>());
 }
 
-static void jfa_set_accUA2(bob::trainer::JFATrainer& trainer,
+static void jfa_set_accUA2(bob::learn::misc::JFATrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccUA2(acc.bz<double,2>());
 }
 
-static void jfa_set_accVA1(bob::trainer::JFATrainer& trainer,
+static void jfa_set_accVA1(bob::learn::misc::JFATrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccVA1(acc.bz<double,3>());
 }
 
-static void jfa_set_accVA2(bob::trainer::JFATrainer& trainer,
+static void jfa_set_accVA2(bob::learn::misc::JFATrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccVA2(acc.bz<double,2>());
 }
 
-static void jfa_set_accDA1(bob::trainer::JFATrainer& trainer,
+static void jfa_set_accDA1(bob::learn::misc::JFATrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccDA1(acc.bz<double,1>());
 }
 
-static void jfa_set_accDA2(bob::trainer::JFATrainer& trainer,
+static void jfa_set_accDA2(bob::learn::misc::JFATrainer& trainer,
   bob::python::const_ndarray acc)
 {
   trainer.setAccDA2(acc.bz<double,1>());
@@ -311,35 +311,35 @@ static void jfa_set_accDA2(bob::trainer::JFATrainer& trainer,
 
 void bind_trainer_jfa()
 {
-  class_<bob::trainer::ISVTrainer, boost::noncopyable >("ISVTrainer", "A trainer for Inter-session Variability Modelling (ISV). \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", init<optional<const size_t, const double> >((arg("self"), arg("max_iterations")=10, arg("relevance_factor")=4.),"Initializes a new ISVTrainer."))
-    .def(init<const bob::trainer::ISVTrainer&>((arg("self"), arg("other")), "Copy constructs an ISVTrainer"))
-    .add_property("max_iterations", &bob::trainer::ISVTrainer::getMaxIterations, &bob::trainer::ISVTrainer::setMaxIterations, "Max iterations")
-    .add_property("rng", &bob::trainer::ISVTrainer::getRng, &bob::trainer::ISVTrainer::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
+  class_<bob::learn::misc::ISVTrainer, boost::noncopyable >("ISVTrainer", "A trainer for Inter-session Variability Modelling (ISV). \n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", init<optional<const size_t, const double> >((arg("self"), arg("max_iterations")=10, arg("relevance_factor")=4.),"Initializes a new ISVTrainer."))
+    .def(init<const bob::learn::misc::ISVTrainer&>((arg("self"), arg("other")), "Copy constructs an ISVTrainer"))
+    .add_property("max_iterations", &bob::learn::misc::ISVTrainer::getMaxIterations, &bob::learn::misc::ISVTrainer::setMaxIterations, "Max iterations")
+    .add_property("rng", &bob::learn::misc::ISVTrainer::getRng, &bob::learn::misc::ISVTrainer::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
     .add_property("__X__", &isv_get_x, &isv_set_x)
     .add_property("__Z__", &isv_get_z, &isv_set_z)
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::trainer::ISVTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVTrainer with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::ISVTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this ISVTrainer with the 'other' one to be approximately the same.")
     .def("train", &isv_train, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the training procedure.")
     .def("initialize", &isv_initialize, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the initialization procedure.")
     .def("e_step", &isv_estep, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the e-step procedure.")
     .def("m_step", &isv_mstep, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the m-step procedure.")
     .def("finalize", &isv_finalize, (arg("self"), arg("isv_base"), arg("gmm_stats")), "Call the finalization procedure.")
     .def("enrol", &isv_enrol, (arg("self"), arg("isv_machine"), arg("gmm_stats"), arg("n_iter")), "Call the enrolment procedure.")
-    .add_property("acc_u_a1", make_function(&bob::trainer::ISVTrainer::getAccUA1, return_value_policy<copy_const_reference>()), &isv_set_accUA1, "Accumulator updated during the E-step")
-    .add_property("acc_u_a2", make_function(&bob::trainer::ISVTrainer::getAccUA2, return_value_policy<copy_const_reference>()), &isv_set_accUA2, "Accumulator updated during the E-step")
+    .add_property("acc_u_a1", make_function(&bob::learn::misc::ISVTrainer::getAccUA1, return_value_policy<copy_const_reference>()), &isv_set_accUA1, "Accumulator updated during the E-step")
+    .add_property("acc_u_a2", make_function(&bob::learn::misc::ISVTrainer::getAccUA2, return_value_policy<copy_const_reference>()), &isv_set_accUA2, "Accumulator updated during the E-step")
   ;
 
-  class_<bob::trainer::JFATrainer, boost::noncopyable >("JFATrainer", "A trainer for Joint Factor Analysis (JFA).\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", init<optional<const size_t> >((arg("self"), arg("max_iterations")=10),"Initializes a new JFATrainer."))
-    .def(init<const bob::trainer::JFATrainer&>((arg("self"), arg("other")), "Copy constructs an JFATrainer"))
-    .add_property("max_iterations", &bob::trainer::JFATrainer::getMaxIterations, &bob::trainer::JFATrainer::setMaxIterations, "Max iterations")
-    .add_property("rng", &bob::trainer::JFATrainer::getRng, &bob::trainer::JFATrainer::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
+  class_<bob::learn::misc::JFATrainer, boost::noncopyable >("JFATrainer", "A trainer for Joint Factor Analysis (JFA).\n\nReferences:\n[1] 'Explicit Modelling of Session Variability for Speaker Verification', R. Vogt, S. Sridharan, Computer Speech & Language, 2008, vol. 22, no. 1, pp. 17-38\n[2] 'Session Variability Modelling for Face Authentication', C. McCool, R. Wallace, M. McLaren, L. El Shafey, S. Marcel, IET Biometrics, 2013", init<optional<const size_t> >((arg("self"), arg("max_iterations")=10),"Initializes a new JFATrainer."))
+    .def(init<const bob::learn::misc::JFATrainer&>((arg("self"), arg("other")), "Copy constructs an JFATrainer"))
+    .add_property("max_iterations", &bob::learn::misc::JFATrainer::getMaxIterations, &bob::learn::misc::JFATrainer::setMaxIterations, "Max iterations")
+    .add_property("rng", &bob::learn::misc::JFATrainer::getRng, &bob::learn::misc::JFATrainer::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of subspaces/arrays before the EM loop.")
     .add_property("__X__", &jfa_get_x, &jfa_set_x)
     .add_property("__Y__", &jfa_get_y, &jfa_set_y)
     .add_property("__Z__", &jfa_get_z, &jfa_set_z)
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::trainer::JFATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFATrainer with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::JFATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this JFATrainer with the 'other' one to be approximately the same.")
     .def("train", &jfa_train, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the training procedure.")
     .def("initialize", &jfa_initialize, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the initialization procedure.")
     .def("train_loop", &jfa_train_loop, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the training procedure (without the initialization). This will train the three subspaces U, V and d.")
@@ -353,11 +353,11 @@ void bind_trainer_jfa()
     .def("m_step3", &jfa_mstep3, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 3rd m-step procedure (for the d subspace).")
     .def("finalize3", &jfa_finalize3, (arg("self"), arg("jfa_base"), arg("gmm_stats")), "Call the 3rd finalization procedure (for the d subspace).")
     .def("enrol", &jfa_enrol, (arg("self"), arg("jfa_machine"), arg("gmm_stats"), arg("n_iter")), "Call the enrolment procedure.")
-    .add_property("acc_v_a1", make_function(&bob::trainer::JFATrainer::getAccVA1, return_value_policy<copy_const_reference>()), &jfa_set_accVA1, "Accumulator updated during the E-step")
-    .add_property("acc_v_a2", make_function(&bob::trainer::JFATrainer::getAccVA2, return_value_policy<copy_const_reference>()), &jfa_set_accVA2, "Accumulator updated during the E-step")
-    .add_property("acc_u_a1", make_function(&bob::trainer::JFATrainer::getAccUA1, return_value_policy<copy_const_reference>()), &jfa_set_accUA1, "Accumulator updated during the E-step")
-    .add_property("acc_u_a2", make_function(&bob::trainer::JFATrainer::getAccUA2, return_value_policy<copy_const_reference>()), &jfa_set_accUA2, "Accumulator updated during the E-step")
-    .add_property("acc_d_a1", make_function(&bob::trainer::JFATrainer::getAccDA1, return_value_policy<copy_const_reference>()), &jfa_set_accDA1, "Accumulator updated during the E-step")
-    .add_property("acc_d_a2", make_function(&bob::trainer::JFATrainer::getAccDA2, return_value_policy<copy_const_reference>()), &jfa_set_accDA2, "Accumulator updated during the E-step")
+    .add_property("acc_v_a1", make_function(&bob::learn::misc::JFATrainer::getAccVA1, return_value_policy<copy_const_reference>()), &jfa_set_accVA1, "Accumulator updated during the E-step")
+    .add_property("acc_v_a2", make_function(&bob::learn::misc::JFATrainer::getAccVA2, return_value_policy<copy_const_reference>()), &jfa_set_accVA2, "Accumulator updated during the E-step")
+    .add_property("acc_u_a1", make_function(&bob::learn::misc::JFATrainer::getAccUA1, return_value_policy<copy_const_reference>()), &jfa_set_accUA1, "Accumulator updated during the E-step")
+    .add_property("acc_u_a2", make_function(&bob::learn::misc::JFATrainer::getAccUA2, return_value_policy<copy_const_reference>()), &jfa_set_accUA2, "Accumulator updated during the E-step")
+    .add_property("acc_d_a1", make_function(&bob::learn::misc::JFATrainer::getAccDA1, return_value_policy<copy_const_reference>()), &jfa_set_accDA1, "Accumulator updated during the E-step")
+    .add_property("acc_d_a2", make_function(&bob::learn::misc::JFATrainer::getAccDA2, return_value_policy<copy_const_reference>()), &jfa_set_accDA2, "Accumulator updated during the E-step")
   ;
 }
diff --git a/bob/learn/misc/old/kmeans.cc b/bob/learn/misc/old/kmeans.cc
index bbac7e5..96c06ec 100644
--- a/bob/learn/misc/old/kmeans.cc
+++ b/bob/learn/misc/old/kmeans.cc
@@ -15,7 +15,7 @@
 
 using namespace boost::python;
 
-static tuple py_getVariancesAndWeightsForEachCluster(const bob::machine::KMeansMachine& machine, bob::python::const_ndarray ar) {
+static tuple py_getVariancesAndWeightsForEachCluster(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray ar) {
   size_t n_means = machine.getNMeans();
   size_t n_inputs = machine.getNInputs();
   bob::python::ndarray variances(bob::io::base::array::t_float64, n_means, n_inputs);
@@ -26,45 +26,45 @@ static tuple py_getVariancesAndWeightsForEachCluster(const bob::machine::KMeansM
   return boost::python::make_tuple(variances.self(), weights.self());
 }
 
-static void py_getVariancesAndWeightsForEachClusterInit(const bob::machine::KMeansMachine& machine, bob::python::ndarray variances, bob::python::ndarray weights) {
+static void py_getVariancesAndWeightsForEachClusterInit(const bob::learn::misc::KMeansMachine& machine, bob::python::ndarray variances, bob::python::ndarray weights) {
   blitz::Array<double,2> variances_ = variances.bz<double,2>();
   blitz::Array<double,1> weights_ = weights.bz<double,1>();
   machine.getVariancesAndWeightsForEachClusterInit(variances_, weights_);
 }
 
-static void py_getVariancesAndWeightsForEachClusterAcc(const bob::machine::KMeansMachine& machine, bob::python::const_ndarray ar, bob::python::ndarray variances, bob::python::ndarray weights) {
+static void py_getVariancesAndWeightsForEachClusterAcc(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray ar, bob::python::ndarray variances, bob::python::ndarray weights) {
   blitz::Array<double,2> variances_ = variances.bz<double,2>();
   blitz::Array<double,1> weights_ = weights.bz<double,1>();
   machine.getVariancesAndWeightsForEachClusterAcc(ar.bz<double,2>(), variances_, weights_);
 }
 
-static void py_getVariancesAndWeightsForEachClusterFin(const bob::machine::KMeansMachine& machine, bob::python::ndarray variances, bob::python::ndarray weights) {
+static void py_getVariancesAndWeightsForEachClusterFin(const bob::learn::misc::KMeansMachine& machine, bob::python::ndarray variances, bob::python::ndarray weights) {
   blitz::Array<double,2> variances_ = variances.bz<double,2>();
   blitz::Array<double,1> weights_ = weights.bz<double,1>();
   machine.getVariancesAndWeightsForEachClusterFin(variances_, weights_);
 }
 
-static object py_getMean(const bob::machine::KMeansMachine& kMeansMachine, const size_t i) {
+static object py_getMean(const bob::learn::misc::KMeansMachine& kMeansMachine, const size_t i) {
   bob::python::ndarray mean(bob::io::base::array::t_float64, kMeansMachine.getNInputs());
   blitz::Array<double,1> mean_ = mean.bz<double,1>();
   kMeansMachine.getMean(i, mean_);
   return mean.self();
 }
 
-static void py_setMean(bob::machine::KMeansMachine& machine, const size_t i, bob::python::const_ndarray mean) {
+static void py_setMean(bob::learn::misc::KMeansMachine& machine, const size_t i, bob::python::const_ndarray mean) {
   machine.setMean(i, mean.bz<double,1>());
 }
 
-static void py_setMeans(bob::machine::KMeansMachine& machine, bob::python::const_ndarray means) {
+static void py_setMeans(bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray means) {
   machine.setMeans(means.bz<double,2>());
 }
 
-static double py_getDistanceFromMean(const bob::machine::KMeansMachine& machine, bob::python::const_ndarray x, const size_t i)
+static double py_getDistanceFromMean(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray x, const size_t i)
 {
   return machine.getDistanceFromMean(x.bz<double,1>(), i);
 }
 
-static tuple py_getClosestMean(const bob::machine::KMeansMachine& machine, bob::python::const_ndarray x)
+static tuple py_getClosestMean(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray x)
 {
   size_t closest_mean;
   double min_distance;
@@ -72,29 +72,29 @@ static tuple py_getClosestMean(const bob::machine::KMeansMachine& machine, bob::
   return boost::python::make_tuple(closest_mean, min_distance);
 }
 
-static double py_getMinDistance(const bob::machine::KMeansMachine& machine, bob::python::const_ndarray input)
+static double py_getMinDistance(const bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray input)
 {
   return machine.getMinDistance(input.bz<double,1>());
 }
 
-static void py_setCacheMeans(bob::machine::KMeansMachine& machine, bob::python::const_ndarray cache_means) {
+static void py_setCacheMeans(bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray cache_means) {
   machine.setCacheMeans(cache_means.bz<double,2>());
 }
 
 
-static boost::shared_ptr<bob::machine::KMeansMachine> _init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::KMeansMachine> _init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::KMeansMachine>(new bob::machine::KMeansMachine(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::KMeansMachine>(new bob::learn::misc::KMeansMachine(*hdf5->f));
 }
 
-static void _load(bob::machine::KMeansMachine& self, boost::python::object file){
+static void _load(bob::learn::misc::KMeansMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void _save(const bob::machine::KMeansMachine& self, boost::python::object file){
+static void _save(const bob::learn::misc::KMeansMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
@@ -102,22 +102,22 @@ static void _save(const bob::machine::KMeansMachine& self, boost::python::object
 
 void bind_machine_kmeans()
 {
-  class_<bob::machine::KMeansMachine, boost::shared_ptr<bob::machine::KMeansMachine>,
-         bases<bob::machine::Machine<blitz::Array<double,1>, double> > >("KMeansMachine",
+  class_<bob::learn::misc::KMeansMachine, boost::shared_ptr<bob::learn::misc::KMeansMachine>,
+         bases<bob::learn::misc::Machine<blitz::Array<double,1>, double> > >("KMeansMachine",
       "This class implements a k-means classifier.\n"
       "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006",
       init<>((arg("self"))))
     .def("__init__", boost::python::make_constructor(&_init))
     .def(init<const size_t, const size_t>((arg("self"), arg("n_means"), arg("n_inputs"))))
-    .def(init<bob::machine::KMeansMachine&>((arg("self"), arg("other"))))
+    .def(init<bob::learn::misc::KMeansMachine&>((arg("self"), arg("other"))))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::KMeansMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this KMeansMachine with the 'other' one to be approximately the same.")
-    .add_property("means", make_function(&bob::machine::KMeansMachine::getMeans, return_value_policy<copy_const_reference>()), &py_setMeans, "The mean vectors")
-    .add_property("__cache_means__", make_function(&bob::machine::KMeansMachine::getCacheMeans, return_value_policy<copy_const_reference>()), &py_setCacheMeans, "The cache mean vectors. This should only be used when parallelizing the get_variances_and_weights_for_each_cluster() method")
-    .add_property("dim_d", &bob::machine::KMeansMachine::getNInputs, "Number of inputs")
-    .add_property("dim_c", &bob::machine::KMeansMachine::getNMeans, "Number of means (k)")
-    .def("resize", &bob::machine::KMeansMachine::resize, (arg("self"), arg("n_means"), arg("n_inputs")), "Resize the number of means and inputs")
+    .def("is_similar_to", &bob::learn::misc::KMeansMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this KMeansMachine with the 'other' one to be approximately the same.")
+    .add_property("means", make_function(&bob::learn::misc::KMeansMachine::getMeans, return_value_policy<copy_const_reference>()), &py_setMeans, "The mean vectors")
+    .add_property("__cache_means__", make_function(&bob::learn::misc::KMeansMachine::getCacheMeans, return_value_policy<copy_const_reference>()), &py_setCacheMeans, "The cache mean vectors. This should only be used when parallelizing the get_variances_and_weights_for_each_cluster() method")
+    .add_property("dim_d", &bob::learn::misc::KMeansMachine::getNInputs, "Number of inputs")
+    .add_property("dim_c", &bob::learn::misc::KMeansMachine::getNMeans, "Number of means (k)")
+    .def("resize", &bob::learn::misc::KMeansMachine::resize, (arg("self"), arg("n_means"), arg("n_inputs")), "Resize the number of means and inputs")
     .def("get_mean", &py_getMean, (arg("self"), arg("i")), "Get the i'th mean")
     .def("set_mean", &py_setMean, (arg("self"), arg("i"), arg("mean")), "Set the i'th mean")
     .def("get_distance_from_mean", &py_getDistanceFromMean, (arg("self"), arg("x"), arg("i")),
diff --git a/bob/learn/misc/old/kmeans_trainer.cc b/bob/learn/misc/old/kmeans_trainer.cc
index 2e2cb21..57129fe 100644
--- a/bob/learn/misc/old/kmeans_trainer.cc
+++ b/bob/learn/misc/old/kmeans_trainer.cc
@@ -10,16 +10,16 @@
 
 using namespace boost::python;
 
-typedef bob::trainer::EMTrainer<bob::machine::KMeansMachine, blitz::Array<double,2> > EMTrainerKMeansBase;
+typedef bob::learn::misc::EMTrainer<bob::learn::misc::KMeansMachine, blitz::Array<double,2> > EMTrainerKMeansBase;
 
-static void py_setZeroethOrderStats(bob::trainer::KMeansTrainer& op, bob::python::const_ndarray stats) {
+static void py_setZeroethOrderStats(bob::learn::misc::KMeansTrainer& op, bob::python::const_ndarray stats) {
   const bob::io::base::array::typeinfo& info = stats.type();
   if(info.dtype != bob::io::base::array::t_float64 || info.nd != 1)
     PYTHON_ERROR(TypeError, "cannot set array of type '%s'", info.str().c_str());
   op.setZeroethOrderStats(stats.bz<double,1>());
 }
 
-static void py_setFirstOrderStats(bob::trainer::KMeansTrainer& op, bob::python::const_ndarray stats) {
+static void py_setFirstOrderStats(bob::learn::misc::KMeansTrainer& op, bob::python::const_ndarray stats) {
   const bob::io::base::array::typeinfo& info = stats.type();
   if(info.dtype != bob::io::base::array::t_float64 || info.nd != 2)
     PYTHON_ERROR(TypeError, "cannot set array of type '%s'", info.str().c_str());
@@ -27,31 +27,31 @@ static void py_setFirstOrderStats(bob::trainer::KMeansTrainer& op, bob::python::
 }
 
 static void py_train(EMTrainerKMeansBase& trainer,
-  bob::machine::KMeansMachine& machine, bob::python::const_ndarray sample)
+  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.train(machine, sample.bz<double,2>());
 }
 
 static void py_initialize(EMTrainerKMeansBase& trainer,
-  bob::machine::KMeansMachine& machine, bob::python::const_ndarray sample)
+  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.initialize(machine, sample.bz<double,2>());
 }
 
 static void py_finalize(EMTrainerKMeansBase& trainer,
-  bob::machine::KMeansMachine& machine, bob::python::const_ndarray sample)
+  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.finalize(machine, sample.bz<double,2>());
 }
 
 static void py_eStep(EMTrainerKMeansBase& trainer,
-  bob::machine::KMeansMachine& machine, bob::python::const_ndarray sample)
+  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.eStep(machine, sample.bz<double,2>());
 }
 
 static void py_mStep(EMTrainerKMeansBase& trainer,
-  bob::machine::KMeansMachine& machine, bob::python::const_ndarray sample)
+  bob::learn::misc::KMeansMachine& machine, bob::python::const_ndarray sample)
 {
   trainer.mStep(machine, sample.bz<double,2>());
 }
@@ -79,7 +79,7 @@ void bind_trainer_kmeans()
   ;
 
   // Starts binding the KMeansTrainer
-  class_<bob::trainer::KMeansTrainer, boost::shared_ptr<bob::trainer::KMeansTrainer>, boost::noncopyable, bases<EMTrainerKMeansBase> > KMT("KMeansTrainer",
+  class_<bob::learn::misc::KMeansTrainer, boost::shared_ptr<bob::learn::misc::KMeansTrainer>, boost::noncopyable, bases<EMTrainerKMeansBase> > KMT("KMeansTrainer",
       "Trains a KMeans machine.\n"
       "This class implements the expectation-maximization algorithm for a k-means machine.\n"
       "See Section 9.1 of Bishop, \"Pattern recognition and machine learning\", 2006\n"
@@ -90,26 +90,26 @@ void bind_trainer_kmeans()
   // Binds methods that does not have nested enum values as default parameters
   KMT.def(self == self)
      .def(self != self)
-     .add_property("initialization_method", &bob::trainer::KMeansTrainer::getInitializationMethod, &bob::trainer::KMeansTrainer::setInitializationMethod, "The initialization method to generate the initial means.")
-     .add_property("rng", &bob::trainer::KMeansTrainer::getRng, &bob::trainer::KMeansTrainer::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of the means.")
-     .add_property("average_min_distance", &bob::trainer::KMeansTrainer::getAverageMinDistance, &bob::trainer::KMeansTrainer::setAverageMinDistance, "Average min (square Euclidean) distance. Useful to parallelize the E-step.")
-     .add_property("zeroeth_order_statistics", make_function(&bob::trainer::KMeansTrainer::getZeroethOrderStats, return_value_policy<copy_const_reference>()), &py_setZeroethOrderStats, "The zeroeth order statistics. Useful to parallelize the E-step.")
-     .add_property("first_order_statistics", make_function(&bob::trainer::KMeansTrainer::getFirstOrderStats, return_value_policy<copy_const_reference>()), &py_setFirstOrderStats, "The first order statistics. Useful to parallelize the E-step.")
+     .add_property("initialization_method", &bob::learn::misc::KMeansTrainer::getInitializationMethod, &bob::learn::misc::KMeansTrainer::setInitializationMethod, "The initialization method to generate the initial means.")
+     .add_property("rng", &bob::learn::misc::KMeansTrainer::getRng, &bob::learn::misc::KMeansTrainer::setRng, "The Mersenne Twister mt19937 random generator used for the initialization of the means.")
+     .add_property("average_min_distance", &bob::learn::misc::KMeansTrainer::getAverageMinDistance, &bob::learn::misc::KMeansTrainer::setAverageMinDistance, "Average min (square Euclidean) distance. Useful to parallelize the E-step.")
+     .add_property("zeroeth_order_statistics", make_function(&bob::learn::misc::KMeansTrainer::getZeroethOrderStats, return_value_policy<copy_const_reference>()), &py_setZeroethOrderStats, "The zeroeth order statistics. Useful to parallelize the E-step.")
+     .add_property("first_order_statistics", make_function(&bob::learn::misc::KMeansTrainer::getFirstOrderStats, return_value_policy<copy_const_reference>()), &py_setFirstOrderStats, "The first order statistics. Useful to parallelize the E-step.")
     ;
 
   // Sets the scope to the one of the KMeansTrainer
   scope s(KMT);
 
   // Adds enum in the previously defined current scope
-  enum_<bob::trainer::KMeansTrainer::InitializationMethod>("initialization_method_type")
-    .value("RANDOM", bob::trainer::KMeansTrainer::RANDOM)
-    .value("RANDOM_NO_DUPLICATE", bob::trainer::KMeansTrainer::RANDOM_NO_DUPLICATE)
+  enum_<bob::learn::misc::KMeansTrainer::InitializationMethod>("initialization_method_type")
+    .value("RANDOM", bob::learn::misc::KMeansTrainer::RANDOM)
+    .value("RANDOM_NO_DUPLICATE", bob::learn::misc::KMeansTrainer::RANDOM_NO_DUPLICATE)
 #if BOOST_VERSION >= 104700
-    .value("KMEANS_PLUS_PLUS", bob::trainer::KMeansTrainer::KMEANS_PLUS_PLUS)
+    .value("KMEANS_PLUS_PLUS", bob::learn::misc::KMeansTrainer::KMEANS_PLUS_PLUS)
 #endif
     .export_values()
     ;
 
   // Binds methods that has nested enum values as default parameters
-  KMT.def(init<optional<double,int,bool,bob::trainer::KMeansTrainer::InitializationMethod> >((arg("self"), arg("convergence_threshold")=0.001, arg("max_iterations")=10, arg("compute_likelihood")=true, arg("initialization_method")=bob::trainer::KMeansTrainer::RANDOM)));
+  KMT.def(init<optional<double,int,bool,bob::learn::misc::KMeansTrainer::InitializationMethod> >((arg("self"), arg("convergence_threshold")=0.001, arg("max_iterations")=10, arg("compute_likelihood")=true, arg("initialization_method")=bob::learn::misc::KMeansTrainer::RANDOM)));
 }
diff --git a/bob/learn/misc/old/linearscoring.cc b/bob/learn/misc/old/linearscoring.cc
index 01ecc32..b614879 100644
--- a/bob/learn/misc/old/linearscoring.cc
+++ b/bob/learn/misc/old/linearscoring.cc
@@ -22,8 +22,8 @@ static void convertGMMMeanList(object models, std::vector<blitz::Array<double,1>
     models_c.push_back(it->bz<double,1>());
 }
 
-static void convertGMMStatsList(object test_stats, std::vector<boost::shared_ptr<const bob::machine::GMMStats> >& test_stats_c) {
-  stl_input_iterator<boost::shared_ptr<bob::machine::GMMStats> > dbegin(test_stats), dend;
+static void convertGMMStatsList(object test_stats, std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> >& test_stats_c) {
+  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMStats> > dbegin(test_stats), dend;
   test_stats_c.assign(dbegin, dend);
 }
 
@@ -36,8 +36,8 @@ static void convertChannelOffsetList(object test_channelOffset, std::vector<blit
     test_channelOffset_c.push_back(it->bz<double,1>());
 }
 
-static void convertGMMMachineList(object models, std::vector<boost::shared_ptr<const bob::machine::GMMMachine> >& models_c) {
-  stl_input_iterator<boost::shared_ptr<bob::machine::GMMMachine> > dbegin(models), dend;
+static void convertGMMMachineList(object models, std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> >& models_c) {
+  stl_input_iterator<boost::shared_ptr<bob::learn::misc::GMMMachine> > dbegin(models), dend;
   models_c.assign(dbegin, dend);
 }
 
@@ -52,43 +52,43 @@ static object linearScoring1(object models,
   std::vector<blitz::Array<double,1> > models_c;
   convertGMMMeanList(models, models_c);
 
-  std::vector<boost::shared_ptr<const bob::machine::GMMStats> > test_stats_c;
+  std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > test_stats_c;
   convertGMMStatsList(test_stats, test_stats_c);
 
   bob::python::ndarray ret(bob::io::base::array::t_float64, models_c.size(), test_stats_c.size());
   blitz::Array<double,2> ret_ = ret.bz<double,2>();
   if (test_channelOffset.ptr() == Py_None || len(test_channelOffset) == 0) { //list is empty
-    bob::machine::linearScoring(models_c, ubm_mean_, ubm_variance_, test_stats_c, frame_length_normalisation, ret_);
+    bob::learn::misc::linearScoring(models_c, ubm_mean_, ubm_variance_, test_stats_c, frame_length_normalisation, ret_);
   }
   else {
     std::vector<blitz::Array<double,1> > test_channelOffset_c;
     convertChannelOffsetList(test_channelOffset, test_channelOffset_c);
-    bob::machine::linearScoring(models_c, ubm_mean_, ubm_variance_, test_stats_c, test_channelOffset_c, frame_length_normalisation, ret_);
+    bob::learn::misc::linearScoring(models_c, ubm_mean_, ubm_variance_, test_stats_c, test_channelOffset_c, frame_length_normalisation, ret_);
   }
 
   return ret.self();
 }
 
 static object linearScoring2(object models,
-    bob::machine::GMMMachine& ubm,
+    bob::learn::misc::GMMMachine& ubm,
     object test_stats, object test_channelOffset = list(), // Empty list
     bool frame_length_normalisation = false)
 {
-  std::vector<boost::shared_ptr<const bob::machine::GMMMachine> > models_c;
+  std::vector<boost::shared_ptr<const bob::learn::misc::GMMMachine> > models_c;
   convertGMMMachineList(models, models_c);
 
-  std::vector<boost::shared_ptr<const bob::machine::GMMStats> > test_stats_c;
+  std::vector<boost::shared_ptr<const bob::learn::misc::GMMStats> > test_stats_c;
   convertGMMStatsList(test_stats, test_stats_c);
 
   bob::python::ndarray ret(bob::io::base::array::t_float64, models_c.size(), test_stats_c.size());
   blitz::Array<double,2> ret_ = ret.bz<double,2>();
   if (test_channelOffset.ptr() == Py_None || len(test_channelOffset) == 0) { //list is empty
-    bob::machine::linearScoring(models_c, ubm, test_stats_c, frame_length_normalisation, ret_);
+    bob::learn::misc::linearScoring(models_c, ubm, test_stats_c, frame_length_normalisation, ret_);
   }
   else {
     std::vector<blitz::Array<double,1> > test_channelOffset_c;
     convertChannelOffsetList(test_channelOffset, test_channelOffset_c);
-    bob::machine::linearScoring(models_c, ubm, test_stats_c, test_channelOffset_c, frame_length_normalisation, ret_);
+    bob::learn::misc::linearScoring(models_c, ubm, test_stats_c, test_channelOffset_c, frame_length_normalisation, ret_);
   }
 
   return ret.self();
@@ -96,10 +96,10 @@ static object linearScoring2(object models,
 
 static double linearScoring3(bob::python::const_ndarray model,
   bob::python::const_ndarray ubm_mean, bob::python::const_ndarray ubm_var,
-  const bob::machine::GMMStats& test_stats, bob::python::const_ndarray test_channelOffset,
+  const bob::learn::misc::GMMStats& test_stats, bob::python::const_ndarray test_channelOffset,
   const bool frame_length_normalisation = false)
 {
-  return bob::machine::linearScoring(model.bz<double,1>(), ubm_mean.bz<double,1>(),
+  return bob::learn::misc::linearScoring(model.bz<double,1>(), ubm_mean.bz<double,1>(),
           ubm_var.bz<double,1>(), test_stats, test_channelOffset.bz<double,1>(), frame_length_normalisation);
 }
 
diff --git a/bob/learn/misc/old/machine.cc b/bob/learn/misc/old/machine.cc
index 5a10dce..31deb42 100644
--- a/bob/learn/misc/old/machine.cc
+++ b/bob/learn/misc/old/machine.cc
@@ -10,14 +10,14 @@
 
 using namespace boost::python;
 
-static double forward(const bob::machine::Machine<blitz::Array<double,1>, double>& m,
+static double forward(const bob::learn::misc::Machine<blitz::Array<double,1>, double>& m,
     bob::python::const_ndarray input) {
   double output;
   m.forward(input.bz<double,1>(), output);
   return output;
 }
 
-static double forward_(const bob::machine::Machine<blitz::Array<double,1>, double>& m,
+static double forward_(const bob::learn::misc::Machine<blitz::Array<double,1>, double>& m,
     bob::python::const_ndarray input) {
   double output;
   m.forward_(input.bz<double,1>(), output);
@@ -26,7 +26,7 @@ static double forward_(const bob::machine::Machine<blitz::Array<double,1>, doubl
 
 void bind_machine_base()
 {
-  class_<bob::machine::Machine<blitz::Array<double,1>, double>, boost::noncopyable>("MachineDoubleBase",
+  class_<bob::learn::misc::Machine<blitz::Array<double,1>, double>, boost::noncopyable>("MachineDoubleBase",
       "Root class for all Machine<blitz::Array<double,1>, double>", no_init)
     .def("__call__", &forward_, (arg("self"), arg("input")), "Executes the machine on the given 1D numpy array of float64, and returns the output. NO CHECK is performed.")
     .def("forward", &forward, (arg("self"), arg("input")), "Executes the machine on the given 1D numpy array of float64, and returns the output.")
diff --git a/bob/learn/misc/old/plda.cc b/bob/learn/misc/old/plda.cc
index 0b2536f..64d94ed 100644
--- a/bob/learn/misc/old/plda.cc
+++ b/bob/learn/misc/old/plda.cc
@@ -16,46 +16,46 @@
 
 using namespace boost::python;
 
-static void py_set_dim_d(bob::machine::PLDABase& machine, const size_t dim_d)
+static void py_set_dim_d(bob::learn::misc::PLDABase& machine, const size_t dim_d)
 {
   machine.resize(dim_d, machine.getDimF(), machine.getDimG());
 }
-static void py_set_dim_f(bob::machine::PLDABase& machine, const size_t dim_f)
+static void py_set_dim_f(bob::learn::misc::PLDABase& machine, const size_t dim_f)
 {
   machine.resize(machine.getDimD(), dim_f, machine.getDimG());
 }
-static void py_set_dim_g(bob::machine::PLDABase& machine, const size_t dim_g)
+static void py_set_dim_g(bob::learn::misc::PLDABase& machine, const size_t dim_g)
 {
   machine.resize(machine.getDimD(), machine.getDimF(), dim_g);
 }
 
 // Set methods that uses blitz::Arrays
-static void py_set_mu(bob::machine::PLDABase& machine,
+static void py_set_mu(bob::learn::misc::PLDABase& machine,
   bob::python::const_ndarray mu)
 {
   machine.setMu(mu.bz<double,1>());
 }
 
-static void py_set_f(bob::machine::PLDABase& machine,
+static void py_set_f(bob::learn::misc::PLDABase& machine,
   bob::python::const_ndarray f)
 {
   machine.setF(f.bz<double,2>());
 }
 
-static void py_set_g(bob::machine::PLDABase& machine,
+static void py_set_g(bob::learn::misc::PLDABase& machine,
   bob::python::const_ndarray g)
 {
   machine.setG(g.bz<double,2>());
 }
 
-static void py_set_sigma(bob::machine::PLDABase& machine,
+static void py_set_sigma(bob::learn::misc::PLDABase& machine,
   bob::python::const_ndarray sigma)
 {
   machine.setSigma(sigma.bz<double,1>());
 }
 
 
-static double computeLogLikelihood(bob::machine::PLDAMachine& plda,
+static double computeLogLikelihood(bob::learn::misc::PLDAMachine& plda,
   bob::python::const_ndarray samples, bool with_enrolled_samples=true)
 {
   const bob::io::base::array::typeinfo& info = samples.type();
@@ -69,7 +69,7 @@ static double computeLogLikelihood(bob::machine::PLDAMachine& plda,
   }
 }
 
-static double plda_forward_sample(bob::machine::PLDAMachine& m,
+static double plda_forward_sample(bob::learn::misc::PLDAMachine& m,
   bob::python::const_ndarray samples)
 {
   const bob::io::base::array::typeinfo& info = samples.type();
@@ -93,7 +93,7 @@ static double plda_forward_sample(bob::machine::PLDAMachine& m,
   }
 }
 
-static double py_log_likelihood_point_estimate(bob::machine::PLDABase& plda,
+static double py_log_likelihood_point_estimate(bob::learn::misc::PLDABase& plda,
   bob::python::const_ndarray xij, bob::python::const_ndarray hi,
   bob::python::const_ndarray wij)
 {
@@ -104,38 +104,38 @@ static double py_log_likelihood_point_estimate(bob::machine::PLDABase& plda,
 BOOST_PYTHON_FUNCTION_OVERLOADS(computeLogLikelihood_overloads, computeLogLikelihood, 2, 3)
 
 
-static boost::shared_ptr<bob::machine::PLDABase> b_init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::PLDABase> b_init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::PLDABase>(new bob::machine::PLDABase(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::PLDABase>(new bob::learn::misc::PLDABase(*hdf5->f));
 }
 
-static void b_load(bob::machine::PLDABase& self, boost::python::object file){
+static void b_load(bob::learn::misc::PLDABase& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void b_save(const bob::machine::PLDABase& self, boost::python::object file){
+static void b_save(const bob::learn::misc::PLDABase& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
 }
 
 
-static boost::shared_ptr<bob::machine::PLDAMachine> m_init(boost::python::object file, boost::shared_ptr<bob::machine::PLDABase> b){
+static boost::shared_ptr<bob::learn::misc::PLDAMachine> m_init(boost::python::object file, boost::shared_ptr<bob::learn::misc::PLDABase> b){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::PLDAMachine>(new bob::machine::PLDAMachine(*hdf5->f, b));
+  return boost::shared_ptr<bob::learn::misc::PLDAMachine>(new bob::learn::misc::PLDAMachine(*hdf5->f, b));
 }
 
-static void m_load(bob::machine::PLDAMachine& self, boost::python::object file){
+static void m_load(bob::learn::misc::PLDAMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void m_save(const bob::machine::PLDAMachine& self, boost::python::object file){
+static void m_save(const bob::learn::misc::PLDAMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
@@ -143,72 +143,72 @@ static void m_save(const bob::machine::PLDAMachine& self, boost::python::object
 
 void bind_machine_plda()
 {
-  class_<bob::machine::PLDABase, boost::shared_ptr<bob::machine::PLDABase> >("PLDABase", "A PLDABase can be seen as a container for the subspaces F, G, the diagonal covariance matrix sigma (stored as a 1D array) and the mean vector mu when performing Probabilistic Linear Discriminant Analysis (PLDA). PLDA is a probabilistic model that incorporates components describing both between-class and within-class variations. A PLDABase can be shared between several PLDAMachine that contains class-specific information (information about the enrolment samples).\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", init<const size_t, const size_t, const size_t, optional<const double> >((arg("self"), arg("dim_d"), arg("dim_f"), arg("dim_g"), arg("variance_flooring")=0.), "Builds a new PLDABase. dim_d is the dimensionality of the input features, dim_f is the dimensionality of the F subspace and dim_g the dimensionality of the G subspace. The variance flooring threshold is the minimum value that the variance sigma can reach, as this diagonal matrix is inverted."))
+  class_<bob::learn::misc::PLDABase, boost::shared_ptr<bob::learn::misc::PLDABase> >("PLDABase", "A PLDABase can be seen as a container for the subspaces F, G, the diagonal covariance matrix sigma (stored as a 1D array) and the mean vector mu when performing Probabilistic Linear Discriminant Analysis (PLDA). PLDA is a probabilistic model that incorporates components describing both between-class and within-class variations. A PLDABase can be shared between several PLDAMachine that contains class-specific information (information about the enrolment samples).\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", init<const size_t, const size_t, const size_t, optional<const double> >((arg("self"), arg("dim_d"), arg("dim_f"), arg("dim_g"), arg("variance_flooring")=0.), "Builds a new PLDABase. dim_d is the dimensionality of the input features, dim_f is the dimensionality of the F subspace and dim_g the dimensionality of the G subspace. The variance flooring threshold is the minimum value that the variance sigma can reach, as this diagonal matrix is inverted."))
     .def(init<>((arg("self")), "Constructs a new empty PLDABase."))
     .def("__init__", boost::python::make_constructor(&b_init), "Constructs a new PLDABase from a configuration file.")
-    .def(init<const bob::machine::PLDABase&>((arg("self"), arg("machine")), "Copy constructs a PLDABase"))
+    .def(init<const bob::learn::misc::PLDABase&>((arg("self"), arg("machine")), "Copy constructs a PLDABase"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::PLDABase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDABase with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::PLDABase::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDABase with the 'other' one to be approximately the same.")
     .def("load", &b_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file.")
     .def("save", &b_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file.")
-    .add_property("dim_d", &bob::machine::PLDABase::getDimD, &py_set_dim_d, "Dimensionality of the input feature vectors")
-    .add_property("dim_f", &bob::machine::PLDABase::getDimF, &py_set_dim_f, "Dimensionality of the F subspace/matrix of the PLDA model")
-    .add_property("dim_g", &bob::machine::PLDABase::getDimG, &py_set_dim_g, "Dimensionality of the G subspace/matrix of the PLDA model")
-    .add_property("mu", make_function(&bob::machine::PLDABase::getMu, return_value_policy<copy_const_reference>()), &py_set_mu, "The mean vector mu of the PLDA model")
-    .add_property("f", make_function(&bob::machine::PLDABase::getF, return_value_policy<copy_const_reference>()), &py_set_f, "The subspace/matrix F of the PLDA model")
-    .add_property("g", make_function(&bob::machine::PLDABase::getG, return_value_policy<copy_const_reference>()), &py_set_g, "The subspace/matrix G of the PLDA model")
-    .add_property("sigma", make_function(&bob::machine::PLDABase::getSigma, return_value_policy<copy_const_reference>()), &py_set_sigma, "The diagonal covariance matrix (represented by a 1D numpy array) sigma of the PLDA model")
-    .add_property("variance_threshold", &bob::machine::PLDABase::getVarianceThreshold, &bob::machine::PLDABase::setVarianceThreshold,
+    .add_property("dim_d", &bob::learn::misc::PLDABase::getDimD, &py_set_dim_d, "Dimensionality of the input feature vectors")
+    .add_property("dim_f", &bob::learn::misc::PLDABase::getDimF, &py_set_dim_f, "Dimensionality of the F subspace/matrix of the PLDA model")
+    .add_property("dim_g", &bob::learn::misc::PLDABase::getDimG, &py_set_dim_g, "Dimensionality of the G subspace/matrix of the PLDA model")
+    .add_property("mu", make_function(&bob::learn::misc::PLDABase::getMu, return_value_policy<copy_const_reference>()), &py_set_mu, "The mean vector mu of the PLDA model")
+    .add_property("f", make_function(&bob::learn::misc::PLDABase::getF, return_value_policy<copy_const_reference>()), &py_set_f, "The subspace/matrix F of the PLDA model")
+    .add_property("g", make_function(&bob::learn::misc::PLDABase::getG, return_value_policy<copy_const_reference>()), &py_set_g, "The subspace/matrix G of the PLDA model")
+    .add_property("sigma", make_function(&bob::learn::misc::PLDABase::getSigma, return_value_policy<copy_const_reference>()), &py_set_sigma, "The diagonal covariance matrix (represented by a 1D numpy array) sigma of the PLDA model")
+    .add_property("variance_threshold", &bob::learn::misc::PLDABase::getVarianceThreshold, &bob::learn::misc::PLDABase::setVarianceThreshold,
       "The variance flooring threshold, i.e. the minimum allowed value of variance (sigma) in each dimension. "
       "The variance sigma will be set to this value if an attempt is made to set it to a smaller value.")
-    .def("resize", &bob::machine::PLDABase::resize, (arg("self"), arg("dim_d"), arg("dim_f"), arg("dim_g")), "Resizes the dimensionality of the PLDA model. Paramaters mu, F, G and sigma are reinitialized.")
-    .def("has_gamma", &bob::machine::PLDABase::hasGamma, (arg("self"), arg("a")), "Tells if the gamma matrix for the given number of samples has already been computed. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("compute_gamma", &bob::machine::PLDABase::computeGamma, (arg("self"), arg("a"), arg("gamma")), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_add_gamma", make_function(&bob::machine::PLDABase::getAddGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_gamma", make_function(&bob::machine::PLDABase::getGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Returns the gamma matrix for the given number of samples if it has already been put in cache. Throws an exception otherwise. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("has_log_like_const_term", &bob::machine::PLDABase::hasLogLikeConstTerm, (arg("self"), arg("a")), "Tells if the log likelihood constant term for the given number of samples has already been computed.")
-    .def("compute_log_like_const_term", (double (bob::machine::PLDABase::*)(const size_t, const blitz::Array<double,2>&) const)&bob::machine::PLDABase::computeLogLikeConstTerm, (arg("self"), arg("a"), arg("gamma")), "Computes the log likelihood constant term for the given number of samples.")
-    .def("get_add_log_like_const_term", &bob::machine::PLDABase::getAddLogLikeConstTerm, (arg("self"), arg("a")), "Computes the log likelihood constant term for the given number of samples, and adds it to the machine (as well as gamma), if it does not already exist.")
-    .def("get_log_like_const_term", &bob::machine::PLDABase::getLogLikeConstTerm, (arg("self"), arg("a")), "Returns the log likelihood constant term for the given number of samples if it has already been put in cache. Throws an exception otherwise.")
-    .def("clear_maps", &bob::machine::PLDABase::clearMaps, (arg("self")), "Clear the maps containing the gamma's as well as the log likelihood constant term for few number of samples. These maps are used to make likelihood computations faster.")
+    .def("resize", &bob::learn::misc::PLDABase::resize, (arg("self"), arg("dim_d"), arg("dim_f"), arg("dim_g")), "Resizes the dimensionality of the PLDA model. Paramaters mu, F, G and sigma are reinitialized.")
+    .def("has_gamma", &bob::learn::misc::PLDABase::hasGamma, (arg("self"), arg("a")), "Tells if the gamma matrix for the given number of samples has already been computed. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("compute_gamma", &bob::learn::misc::PLDABase::computeGamma, (arg("self"), arg("a"), arg("gamma")), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("get_add_gamma", make_function(&bob::learn::misc::PLDABase::getAddGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("get_gamma", make_function(&bob::learn::misc::PLDABase::getGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Returns the gamma matrix for the given number of samples if it has already been put in cache. Throws an exception otherwise. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("has_log_like_const_term", &bob::learn::misc::PLDABase::hasLogLikeConstTerm, (arg("self"), arg("a")), "Tells if the log likelihood constant term for the given number of samples has already been computed.")
+    .def("compute_log_like_const_term", (double (bob::learn::misc::PLDABase::*)(const size_t, const blitz::Array<double,2>&) const)&bob::learn::misc::PLDABase::computeLogLikeConstTerm, (arg("self"), arg("a"), arg("gamma")), "Computes the log likelihood constant term for the given number of samples.")
+    .def("get_add_log_like_const_term", &bob::learn::misc::PLDABase::getAddLogLikeConstTerm, (arg("self"), arg("a")), "Computes the log likelihood constant term for the given number of samples, and adds it to the machine (as well as gamma), if it does not already exist.")
+    .def("get_log_like_const_term", &bob::learn::misc::PLDABase::getLogLikeConstTerm, (arg("self"), arg("a")), "Returns the log likelihood constant term for the given number of samples if it has already been put in cache. Throws an exception otherwise.")
+    .def("clear_maps", &bob::learn::misc::PLDABase::clearMaps, (arg("self")), "Clear the maps containing the gamma's as well as the log likelihood constant term for few number of samples. These maps are used to make likelihood computations faster.")
     .def("compute_log_likelihood_point_estimate", &py_log_likelihood_point_estimate, (arg("self"), arg("xij"), arg("hi"), arg("wij")), "Computes the log-likelihood of a sample given the latent variables hi and wij (point estimate rather than Bayesian-like full integration).")
     .def(self_ns::str(self_ns::self))
-    .add_property("__isigma__", make_function(&bob::machine::PLDABase::getISigma, return_value_policy<copy_const_reference>()), "sigma^{-1} matrix stored in cache")
-    .add_property("__alpha__", make_function(&bob::machine::PLDABase::getAlpha, return_value_policy<copy_const_reference>()), "alpha matrix stored in cache")
-    .add_property("__beta__", make_function(&bob::machine::PLDABase::getBeta, return_value_policy<copy_const_reference>()), "beta matrix stored in cache")
-    .add_property("__ft_beta__", make_function(&bob::machine::PLDABase::getFtBeta, return_value_policy<copy_const_reference>()), "F^T.beta matrix stored in cache")
-    .add_property("__gt_i_sigma__", make_function(&bob::machine::PLDABase::getGtISigma, return_value_policy<copy_const_reference>()), "G^T.sigma^{-1} matrix stored in cache")
-    .add_property("__logdet_alpha__", &bob::machine::PLDABase::getLogDetAlpha, "Logarithm of the determinant of the alpha matrix stored in cache.")
-    .add_property("__logdet_sigma__", &bob::machine::PLDABase::getLogDetSigma, "Logarithm of the determinant of the sigma matrix stored in cache.")
-    .def("__precompute__", &bob::machine::PLDABase::precompute, (arg("self")), "Precomputes useful values such as alpha and beta.")
-    .def("__precompute_log_like__", &bob::machine::PLDABase::precomputeLogLike, (arg("self")), "Precomputes useful values for log-likelihood computations.")
+    .add_property("__isigma__", make_function(&bob::learn::misc::PLDABase::getISigma, return_value_policy<copy_const_reference>()), "sigma^{-1} matrix stored in cache")
+    .add_property("__alpha__", make_function(&bob::learn::misc::PLDABase::getAlpha, return_value_policy<copy_const_reference>()), "alpha matrix stored in cache")
+    .add_property("__beta__", make_function(&bob::learn::misc::PLDABase::getBeta, return_value_policy<copy_const_reference>()), "beta matrix stored in cache")
+    .add_property("__ft_beta__", make_function(&bob::learn::misc::PLDABase::getFtBeta, return_value_policy<copy_const_reference>()), "F^T.beta matrix stored in cache")
+    .add_property("__gt_i_sigma__", make_function(&bob::learn::misc::PLDABase::getGtISigma, return_value_policy<copy_const_reference>()), "G^T.sigma^{-1} matrix stored in cache")
+    .add_property("__logdet_alpha__", &bob::learn::misc::PLDABase::getLogDetAlpha, "Logarithm of the determinant of the alpha matrix stored in cache.")
+    .add_property("__logdet_sigma__", &bob::learn::misc::PLDABase::getLogDetSigma, "Logarithm of the determinant of the sigma matrix stored in cache.")
+    .def("__precompute__", &bob::learn::misc::PLDABase::precompute, (arg("self")), "Precomputes useful values such as alpha and beta.")
+    .def("__precompute_log_like__", &bob::learn::misc::PLDABase::precomputeLogLike, (arg("self")), "Precomputes useful values for log-likelihood computations.")
   ;
 
-  class_<bob::machine::PLDAMachine, boost::shared_ptr<bob::machine::PLDAMachine> >("PLDAMachine", "A PLDAMachine contains class-specific information (from the enrolment samples) when performing Probabilistic Linear Discriminant Analysis (PLDA). It should be attached to a PLDABase that contains information such as the subspaces F and G.\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", init<boost::shared_ptr<bob::machine::PLDABase> >((arg("self"), arg("plda_base")), "Builds a new PLDAMachine. An attached PLDABase should be provided, that can be shared by several PLDAMachine."))
+  class_<bob::learn::misc::PLDAMachine, boost::shared_ptr<bob::learn::misc::PLDAMachine> >("PLDAMachine", "A PLDAMachine contains class-specific information (from the enrolment samples) when performing Probabilistic Linear Discriminant Analysis (PLDA). It should be attached to a PLDABase that contains information such as the subspaces F and G.\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", init<boost::shared_ptr<bob::learn::misc::PLDABase> >((arg("self"), arg("plda_base")), "Builds a new PLDAMachine. An attached PLDABase should be provided, that can be shared by several PLDAMachine."))
     .def(init<>((arg("self")), "Constructs a new empty (invalid) PLDAMachine. A PLDABase should then be set using the 'plda_base' attribute of this object."))
     .def("__init__", make_constructor(&m_init), "Constructs a new PLDAMachine from a configuration file (and a PLDABase object).")
-    .def(init<const bob::machine::PLDAMachine&>((arg("self"), arg("machine")), "Copy constructs a PLDAMachine"))
+    .def(init<const bob::learn::misc::PLDAMachine&>((arg("self"), arg("machine")), "Copy constructs a PLDAMachine"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::PLDAMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDAMachine with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::PLDAMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDAMachine with the 'other' one to be approximately the same.")
     .def("load", &m_load, (arg("self"), arg("config")), "Loads the configuration parameters from a configuration file. The PLDABase will not be loaded, and has to be set manually using the 'plda_base' attribute.")
     .def("save", &m_save, (arg("self"), arg("config")), "Saves the configuration parameters to a configuration file. The PLDABase will not be saved, and has to be saved separately, as it can be shared by several PLDAMachines.")
-    .add_property("plda_base", &bob::machine::PLDAMachine::getPLDABase, &bob::machine::PLDAMachine::setPLDABase)
-    .add_property("dim_d", &bob::machine::PLDAMachine::getDimD, "Dimensionality of the input feature vectors")
-    .add_property("dim_f", &bob::machine::PLDAMachine::getDimF, "Dimensionality of the F subspace/matrix of the PLDA model")
-    .add_property("dim_g", &bob::machine::PLDAMachine::getDimG, "Dimensionality of the G subspace/matrix of the PLDA model")
-    .add_property("n_samples", &bob::machine::PLDAMachine::getNSamples, &bob::machine::PLDAMachine::setNSamples, "Number of enrolled samples")
-    .add_property("w_sum_xit_beta_xi", &bob::machine::PLDAMachine::getWSumXitBetaXi, &bob::machine::PLDAMachine::setWSumXitBetaXi)
-    .add_property("weighted_sum", make_function(&bob::machine::PLDAMachine::getWeightedSum, return_value_policy<copy_const_reference>()), &bob::machine::PLDAMachine::setWeightedSum)
-    .add_property("log_likelihood", &bob::machine::PLDAMachine::getLogLikelihood, &bob::machine::PLDAMachine::setLogLikelihood)
-    .def("has_gamma", &bob::machine::PLDAMachine::hasGamma, (arg("self"), arg("a")), "Tells if the gamma matrix for the given number of samples has already been computed. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_add_gamma", make_function(&bob::machine::PLDAMachine::getAddGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("get_gamma", make_function(&bob::machine::PLDAMachine::getGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Returns the gamma matrix for the given number of samples if it has already been put in cache. Throws an exception otherwise. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
-    .def("has_log_like_const_term", &bob::machine::PLDAMachine::hasLogLikeConstTerm, (arg("self"), arg("a")), "Tells if the log likelihood constant term for the given number of samples has already been computed.")
-    .def("get_add_log_like_const_term", &bob::machine::PLDAMachine::getAddLogLikeConstTerm, (arg("self"), arg("a")), "Computes the log likelihood constant term for the given number of samples, and adds it to the machine (as well as gamma), if it does not already exist.")
-    .def("get_log_like_const_term", &bob::machine::PLDAMachine::getLogLikeConstTerm, (arg("self"), arg("a")), "Returns the log likelihood constant term for the given number of samples if it has already been put in cache. Throws an exception otherwise.")
-    .def("clear_maps", &bob::machine::PLDAMachine::clearMaps, (arg("self")), "Clears the maps containing the gamma's as well as the log likelihood constant term for few number of samples. These maps are used to make likelihood computations faster.")
+    .add_property("plda_base", &bob::learn::misc::PLDAMachine::getPLDABase, &bob::learn::misc::PLDAMachine::setPLDABase)
+    .add_property("dim_d", &bob::learn::misc::PLDAMachine::getDimD, "Dimensionality of the input feature vectors")
+    .add_property("dim_f", &bob::learn::misc::PLDAMachine::getDimF, "Dimensionality of the F subspace/matrix of the PLDA model")
+    .add_property("dim_g", &bob::learn::misc::PLDAMachine::getDimG, "Dimensionality of the G subspace/matrix of the PLDA model")
+    .add_property("n_samples", &bob::learn::misc::PLDAMachine::getNSamples, &bob::learn::misc::PLDAMachine::setNSamples, "Number of enrolled samples")
+    .add_property("w_sum_xit_beta_xi", &bob::learn::misc::PLDAMachine::getWSumXitBetaXi, &bob::learn::misc::PLDAMachine::setWSumXitBetaXi)
+    .add_property("weighted_sum", make_function(&bob::learn::misc::PLDAMachine::getWeightedSum, return_value_policy<copy_const_reference>()), &bob::learn::misc::PLDAMachine::setWeightedSum)
+    .add_property("log_likelihood", &bob::learn::misc::PLDAMachine::getLogLikelihood, &bob::learn::misc::PLDAMachine::setLogLikelihood)
+    .def("has_gamma", &bob::learn::misc::PLDAMachine::hasGamma, (arg("self"), arg("a")), "Tells if the gamma matrix for the given number of samples has already been computed. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("get_add_gamma", make_function(&bob::learn::misc::PLDAMachine::getAddGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Computes the gamma matrix for the given number of samples. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("get_gamma", make_function(&bob::learn::misc::PLDAMachine::getGamma, return_value_policy<copy_const_reference>(), (arg("self"), arg("a"))), "Returns the gamma matrix for the given number of samples if it has already been put in cache. Throws an exception otherwise. (gamma = inverse(I+a.F^T.beta.F), please check the documentation/source code for more details.")
+    .def("has_log_like_const_term", &bob::learn::misc::PLDAMachine::hasLogLikeConstTerm, (arg("self"), arg("a")), "Tells if the log likelihood constant term for the given number of samples has already been computed.")
+    .def("get_add_log_like_const_term", &bob::learn::misc::PLDAMachine::getAddLogLikeConstTerm, (arg("self"), arg("a")), "Computes the log likelihood constant term for the given number of samples, and adds it to the machine (as well as gamma), if it does not already exist.")
+    .def("get_log_like_const_term", &bob::learn::misc::PLDAMachine::getLogLikeConstTerm, (arg("self"), arg("a")), "Returns the log likelihood constant term for the given number of samples if it has already been put in cache. Throws an exception otherwise.")
+    .def("clear_maps", &bob::learn::misc::PLDAMachine::clearMaps, (arg("self")), "Clears the maps containing the gamma's as well as the log likelihood constant term for few number of samples. These maps are used to make likelihood computations faster.")
     .def("compute_log_likelihood", &computeLogLikelihood, computeLogLikelihood_overloads((arg("self"), arg("sample"), arg("use_enrolled_samples")=true), "Computes the log-likelihood considering only the probe sample(s) or jointly the probe sample(s) and the enrolled samples."))
     .def("__call__", &plda_forward_sample, (arg("self"), arg("sample")), "Processes a sample and returns a log-likelihood ratio score.")
     .def("forward", &plda_forward_sample, (arg("self"), arg("sample")), "Processes a sample and returns a log-likelihood ratio score.")
diff --git a/bob/learn/misc/old/plda_trainer.cc b/bob/learn/misc/old/plda_trainer.cc
index a5b2cf4..62c39d9 100644
--- a/bob/learn/misc/old/plda_trainer.cc
+++ b/bob/learn/misc/old/plda_trainer.cc
@@ -15,9 +15,9 @@
 
 using namespace boost::python;
 
-typedef bob::trainer::EMTrainer<bob::machine::PLDABase, std::vector<blitz::Array<double,2> > > EMTrainerPLDA;
+typedef bob::learn::misc::EMTrainer<bob::learn::misc::PLDABase, std::vector<blitz::Array<double,2> > > EMTrainerPLDA;
 
-static void plda_train(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
+static void plda_train(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
   std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
@@ -29,7 +29,7 @@ static void plda_train(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
   t.train(m, vdata_ref);
 }
 
-static void plda_initialize(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
+static void plda_initialize(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
   std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
@@ -41,7 +41,7 @@ static void plda_initialize(EMTrainerPLDA& t, bob::machine::PLDABase& m, object
   t.initialize(m, vdata_ref);
 }
 
-static void plda_eStep(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
+static void plda_eStep(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
   std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
@@ -53,7 +53,7 @@ static void plda_eStep(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
   t.eStep(m, vdata_ref);
 }
 
-static void plda_mStep(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
+static void plda_mStep(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
   std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
@@ -65,7 +65,7 @@ static void plda_mStep(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
   t.mStep(m, vdata_ref);
 }
 
-static void plda_finalize(EMTrainerPLDA& t, bob::machine::PLDABase& m, object data)
+static void plda_finalize(EMTrainerPLDA& t, bob::learn::misc::PLDABase& m, object data)
 {
   stl_input_iterator<bob::python::const_ndarray> dbegin(data), dend;
   std::vector<bob::python::const_ndarray> vdata(dbegin, dend);
@@ -77,14 +77,14 @@ static void plda_finalize(EMTrainerPLDA& t, bob::machine::PLDABase& m, object da
   t.finalize(m, vdata_ref);
 }
 
-static object get_z_first_order(bob::trainer::PLDATrainer& m) {
+static object get_z_first_order(bob::learn::misc::PLDATrainer& m) {
   const std::vector<blitz::Array<double,2> >& v = m.getZFirstOrder();
   list retval;
   for (size_t k=0; k<v.size(); ++k) retval.append(v[k]); //copy
   return tuple(retval);
 }
 
-static object get_z_second_order(bob::trainer::PLDATrainer& m) {
+static object get_z_second_order(bob::learn::misc::PLDATrainer& m) {
   const std::vector<blitz::Array<double,3> >& v = m.getZSecondOrder();
   list retval;
   for (size_t k=0; k<v.size(); ++k) retval.append(v[k]); //copy
@@ -105,50 +105,50 @@ void bind_trainer_plda()
     .def("m_step", &plda_mStep, (arg("self"), arg("machine"), arg("data")), "Updates the Machine parameters given the hidden variable distribution (or the sufficient statistics)")
   ;
 
-  class_<bob::trainer::PLDATrainer, boost::noncopyable, bases<EMTrainerPLDA> > PLDAT("PLDATrainer", "A trainer for Probabilistic Linear Discriminant Analysis (PLDA). The train() method will learn the mu, F, G and Sigma of the model, whereas the enrol() method, will store model information about the enrolment samples for a specific class.\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", no_init);
+  class_<bob::learn::misc::PLDATrainer, boost::noncopyable, bases<EMTrainerPLDA> > PLDAT("PLDATrainer", "A trainer for Probabilistic Linear Discriminant Analysis (PLDA). The train() method will learn the mu, F, G and Sigma of the model, whereas the enrol() method, will store model information about the enrolment samples for a specific class.\n\nReferences:\n1. 'A Scalable Formulation of Probabilistic Linear Discriminant Analysis: Applied to Face Recognition', Laurent El Shafey, Chris McCool, Roy Wallace, Sebastien Marcel, TPAMI'2013\n2. 'Probabilistic Linear Discriminant Analysis for Inference About Identity', Prince and Elder, ICCV'2007.\n3. 'Probabilistic Models for Inference about Identity', Li, Fu, Mohammed, Elder and Prince, TPAMI'2012.", no_init);
 
   PLDAT.def(init<optional<const size_t, const bool> >((arg("self"), arg("max_iterations")=100, arg("use_sum_second_order")=true),"Initializes a new PLDATrainer."))
-    .def(init<const bob::trainer::PLDATrainer&>((arg("self"), arg("trainer")), "Copy constructs a PLDATrainer"))
+    .def(init<const bob::learn::misc::PLDATrainer&>((arg("self"), arg("trainer")), "Copy constructs a PLDATrainer"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::trainer::PLDATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDATrainer with the 'other' one to be approximately the same.")
-    .def("enrol", &bob::trainer::PLDATrainer::enrol, (arg("self"), arg("plda_machine"), arg("data")), "Enrol a class-specific model (PLDAMachine) given a set of enrolment samples.")
-    .add_property("use_sum_second_order", &bob::trainer::PLDATrainer::getUseSumSecondOrder, &bob::trainer::PLDATrainer::setUseSumSecondOrder, "Tells whether the second order statistics are stored during the training procedure, or only their sum.")
+    .def("is_similar_to", &bob::learn::misc::PLDATrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this PLDATrainer with the 'other' one to be approximately the same.")
+    .def("enrol", &bob::learn::misc::PLDATrainer::enrol, (arg("self"), arg("plda_machine"), arg("data")), "Enrol a class-specific model (PLDAMachine) given a set of enrolment samples.")
+    .add_property("use_sum_second_order", &bob::learn::misc::PLDATrainer::getUseSumSecondOrder, &bob::learn::misc::PLDATrainer::setUseSumSecondOrder, "Tells whether the second order statistics are stored during the training procedure, or only their sum.")
     .add_property("z_first_order", &get_z_first_order)
     .add_property("z_second_order", &get_z_second_order)
-    .add_property("z_second_order_sum", make_function(&bob::trainer::PLDATrainer::getZSecondOrderSum, return_value_policy<copy_const_reference>()))
+    .add_property("z_second_order_sum", make_function(&bob::learn::misc::PLDATrainer::getZSecondOrderSum, return_value_policy<copy_const_reference>()))
   ;
 
   // Sets the scope to the one of the PLDATrainer
   scope s(PLDAT);
 
   // Adds enums in the previously defined current scope
-  enum_<bob::trainer::PLDATrainer::InitFMethod>("init_f_method")
-    .value("RANDOM_F", bob::trainer::PLDATrainer::RANDOM_F)
-    .value("BETWEEN_SCATTER", bob::trainer::PLDATrainer::BETWEEN_SCATTER)
+  enum_<bob::learn::misc::PLDATrainer::InitFMethod>("init_f_method")
+    .value("RANDOM_F", bob::learn::misc::PLDATrainer::RANDOM_F)
+    .value("BETWEEN_SCATTER", bob::learn::misc::PLDATrainer::BETWEEN_SCATTER)
     .export_values()
   ;
 
-  enum_<bob::trainer::PLDATrainer::InitGMethod>("init_g_method")
-    .value("RANDOM_G", bob::trainer::PLDATrainer::RANDOM_G)
-    .value("WITHIN_SCATTER", bob::trainer::PLDATrainer::WITHIN_SCATTER)
+  enum_<bob::learn::misc::PLDATrainer::InitGMethod>("init_g_method")
+    .value("RANDOM_G", bob::learn::misc::PLDATrainer::RANDOM_G)
+    .value("WITHIN_SCATTER", bob::learn::misc::PLDATrainer::WITHIN_SCATTER)
     .export_values()
   ;
 
-  enum_<bob::trainer::PLDATrainer::InitSigmaMethod>("init_sigma_method")
-    .value("RANDOM_SIGMA", bob::trainer::PLDATrainer::RANDOM_SIGMA)
-    .value("VARIANCE_G", bob::trainer::PLDATrainer::VARIANCE_G)
-    .value("CONSTANT", bob::trainer::PLDATrainer::CONSTANT)
-    .value("VARIANCE_DATA", bob::trainer::PLDATrainer::VARIANCE_DATA)
+  enum_<bob::learn::misc::PLDATrainer::InitSigmaMethod>("init_sigma_method")
+    .value("RANDOM_SIGMA", bob::learn::misc::PLDATrainer::RANDOM_SIGMA)
+    .value("VARIANCE_G", bob::learn::misc::PLDATrainer::VARIANCE_G)
+    .value("CONSTANT", bob::learn::misc::PLDATrainer::CONSTANT)
+    .value("VARIANCE_DATA", bob::learn::misc::PLDATrainer::VARIANCE_DATA)
     .export_values()
   ;
 
   // Binds randomization/enumration-related methods
-  PLDAT.add_property("init_f_method", &bob::trainer::PLDATrainer::getInitFMethod, &bob::trainer::PLDATrainer::setInitFMethod, "The method used for the initialization of F.")
-    .add_property("init_f_ratio", &bob::trainer::PLDATrainer::getInitFRatio, &bob::trainer::PLDATrainer::setInitFRatio, "The ratio used for the initialization of F.")
-    .add_property("init_g_method", &bob::trainer::PLDATrainer::getInitGMethod, &bob::trainer::PLDATrainer::setInitGMethod, "The method used for the initialization of G.")
-    .add_property("init_g_ratio", &bob::trainer::PLDATrainer::getInitGRatio, &bob::trainer::PLDATrainer::setInitGRatio, "The ratio used for the initialization of G.")
-    .add_property("init_sigma_method", &bob::trainer::PLDATrainer::getInitSigmaMethod, &bob::trainer::PLDATrainer::setInitSigmaMethod, "The method used for the initialization of sigma.")
-    .add_property("init_sigma_ratio", &bob::trainer::PLDATrainer::getInitSigmaRatio, &bob::trainer::PLDATrainer::setInitSigmaRatio, "The ratio used for the initialization of sigma.")
+  PLDAT.add_property("init_f_method", &bob::learn::misc::PLDATrainer::getInitFMethod, &bob::learn::misc::PLDATrainer::setInitFMethod, "The method used for the initialization of F.")
+    .add_property("init_f_ratio", &bob::learn::misc::PLDATrainer::getInitFRatio, &bob::learn::misc::PLDATrainer::setInitFRatio, "The ratio used for the initialization of F.")
+    .add_property("init_g_method", &bob::learn::misc::PLDATrainer::getInitGMethod, &bob::learn::misc::PLDATrainer::setInitGMethod, "The method used for the initialization of G.")
+    .add_property("init_g_ratio", &bob::learn::misc::PLDATrainer::getInitGRatio, &bob::learn::misc::PLDATrainer::setInitGRatio, "The ratio used for the initialization of G.")
+    .add_property("init_sigma_method", &bob::learn::misc::PLDATrainer::getInitSigmaMethod, &bob::learn::misc::PLDATrainer::setInitSigmaMethod, "The method used for the initialization of sigma.")
+    .add_property("init_sigma_ratio", &bob::learn::misc::PLDATrainer::getInitSigmaRatio, &bob::learn::misc::PLDATrainer::setInitSigmaRatio, "The ratio used for the initialization of sigma.")
   ;
 }
diff --git a/bob/learn/misc/old/wiener.cc b/bob/learn/misc/old/wiener.cc
index 4ff72dd..fee9aee 100644
--- a/bob/learn/misc/old/wiener.cc
+++ b/bob/learn/misc/old/wiener.cc
@@ -16,21 +16,21 @@
 
 using namespace boost::python;
 
-static void py_forward1_(const bob::machine::WienerMachine& m,
+static void py_forward1_(const bob::learn::misc::WienerMachine& m,
   bob::python::const_ndarray input, bob::python::ndarray output)
 {
   blitz::Array<double,2> output_ = output.bz<double,2>();
   m.forward_(input.bz<double,2>(), output_);
 }
 
-static void py_forward1(const bob::machine::WienerMachine& m,
+static void py_forward1(const bob::learn::misc::WienerMachine& m,
   bob::python::const_ndarray input, bob::python::ndarray output)
 {
   blitz::Array<double,2> output_ = output.bz<double,2>();
   m.forward(input.bz<double,2>(), output_);
 }
 
-static object py_forward2(const bob::machine::WienerMachine& m,
+static object py_forward2(const bob::learn::misc::WienerMachine& m,
   bob::python::const_ndarray input)
 {
   const bob::io::base::array::typeinfo& info = input.type();
@@ -40,37 +40,37 @@ static object py_forward2(const bob::machine::WienerMachine& m,
   return output.self();
 }
 
-static tuple get_shape(const bob::machine::WienerMachine& m)
+static tuple get_shape(const bob::learn::misc::WienerMachine& m)
 {
   return make_tuple(m.getHeight(), m.getWidth());
 }
 
-static void set_shape(bob::machine::WienerMachine& m,
+static void set_shape(bob::learn::misc::WienerMachine& m,
     const blitz::TinyVector<int,2>& s)
 {
   m.resize(s(0), s(1));
 }
 
-static void py_set_ps(bob::machine::WienerMachine& m,
+static void py_set_ps(bob::learn::misc::WienerMachine& m,
   bob::python::const_ndarray ps)
 {
   m.setPs(ps.bz<double,2>());
 }
 
 
-static boost::shared_ptr<bob::machine::WienerMachine> _init(boost::python::object file){
+static boost::shared_ptr<bob::learn::misc::WienerMachine> _init(boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
-  return boost::shared_ptr<bob::machine::WienerMachine>(new bob::machine::WienerMachine(*hdf5->f));
+  return boost::shared_ptr<bob::learn::misc::WienerMachine>(new bob::learn::misc::WienerMachine(*hdf5->f));
 }
 
-static void _load(bob::machine::WienerMachine& self, boost::python::object file){
+static void _load(bob::learn::misc::WienerMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.load(*hdf5->f);
 }
 
-static void _save(const bob::machine::WienerMachine& self, boost::python::object file){
+static void _save(const bob::learn::misc::WienerMachine& self, boost::python::object file){
   if (!PyBobIoHDF5File_Check(file.ptr())) PYTHON_ERROR(TypeError, "Would have expected a bob.io.base.HDF5File");
   PyBobIoHDF5FileObject* hdf5 = (PyBobIoHDF5FileObject*) file.ptr();
   self.save(*hdf5->f);
@@ -79,22 +79,22 @@ static void _save(const bob::machine::WienerMachine& self, boost::python::object
 
 void bind_machine_wiener()
 {
-  class_<bob::machine::WienerMachine, boost::shared_ptr<bob::machine::WienerMachine> >("WienerMachine", "A Wiener filter.\nReference:\n'Computer Vision: Algorithms and Applications', Richard Szeliski, (Part 3.4.3)", init<const size_t, const size_t, const double, optional<const double> >((arg("self"), arg("height"), arg("width"), arg("pn"), arg("variance_threshold")=1e-8), "Constructs a new Wiener filter dedicated to images of the given dimensions. The filter is initialized with zero values."))
+  class_<bob::learn::misc::WienerMachine, boost::shared_ptr<bob::learn::misc::WienerMachine> >("WienerMachine", "A Wiener filter.\nReference:\n'Computer Vision: Algorithms and Applications', Richard Szeliski, (Part 3.4.3)", init<const size_t, const size_t, const double, optional<const double> >((arg("self"), arg("height"), arg("width"), arg("pn"), arg("variance_threshold")=1e-8), "Constructs a new Wiener filter dedicated to images of the given dimensions. The filter is initialized with zero values."))
     .def(init<const blitz::Array<double,2>&, const double> ((arg("self"), arg("ps"), arg("pn")), "Constructs a new WienerMachine from a set of variance estimates ps, a noise level pn."))
     .def(init<>((arg("self")), "Default constructor, builds a machine as with 'WienerMachine(0,0,0)'."))
     .def("__init__", boost::python::make_constructor(&_init), "Constructs a new WienerMachine from a configuration file.")
-    .def(init<const bob::machine::WienerMachine&>((arg("self"), arg("machine")), "Copy constructs an WienerMachine"))
+    .def(init<const bob::learn::misc::WienerMachine&>((arg("self"), arg("machine")), "Copy constructs an WienerMachine"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::machine::WienerMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this WienerMachine with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::WienerMachine::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this WienerMachine with the 'other' one to be approximately the same.")
     .def("load", &_load, (arg("self"), arg("config")), "Loads the filter from a configuration file.")
     .def("save", &_save, (arg("self"), arg("config")), "Saves the filter to a configuration file.")
-    .add_property("pn", &bob::machine::WienerMachine::getPn, &bob::machine::WienerMachine::setPn, "Noise level Pn")
-    .add_property("variance_threshold", &bob::machine::WienerMachine::getVarianceThreshold, &bob::machine::WienerMachine::setVarianceThreshold, "Variance flooring threshold (min variance value)")
-    .add_property("ps",make_function(&bob::machine::WienerMachine::getPs, return_value_policy<copy_const_reference>()), &py_set_ps, "Variance Ps estimated at each frequency")
-    .add_property("w", make_function(&bob::machine::WienerMachine::getW, return_value_policy<copy_const_reference>()), "The Wiener filter W (W=1/(1+Pn/Ps)) (read-only)")
-    .add_property("height", &bob::machine::WienerMachine::getHeight, &bob::machine::WienerMachine::setHeight, "Height of the filter/image to process")
-    .add_property("width", &bob::machine::WienerMachine::getWidth, &bob::machine::WienerMachine::setWidth, "Width of the filter/image to process")
+    .add_property("pn", &bob::learn::misc::WienerMachine::getPn, &bob::learn::misc::WienerMachine::setPn, "Noise level Pn")
+    .add_property("variance_threshold", &bob::learn::misc::WienerMachine::getVarianceThreshold, &bob::learn::misc::WienerMachine::setVarianceThreshold, "Variance flooring threshold (min variance value)")
+    .add_property("ps",make_function(&bob::learn::misc::WienerMachine::getPs, return_value_policy<copy_const_reference>()), &py_set_ps, "Variance Ps estimated at each frequency")
+    .add_property("w", make_function(&bob::learn::misc::WienerMachine::getW, return_value_policy<copy_const_reference>()), "The Wiener filter W (W=1/(1+Pn/Ps)) (read-only)")
+    .add_property("height", &bob::learn::misc::WienerMachine::getHeight, &bob::learn::misc::WienerMachine::setHeight, "Height of the filter/image to process")
+    .add_property("width", &bob::learn::misc::WienerMachine::getWidth, &bob::learn::misc::WienerMachine::setWidth, "Width of the filter/image to process")
     .add_property("shape", &get_shape, &set_shape)
     .def("__call__", &py_forward1, (arg("self"), arg("input"), arg("output")), "Filters the input and saves results on the output.")
     .def("forward", &py_forward1, (arg("self"), arg("input"), arg("output")), "Filters the input and saves results on the output.")
diff --git a/bob/learn/misc/old/wiener_trainer.cc b/bob/learn/misc/old/wiener_trainer.cc
index 4a795ce..234f9b4 100644
--- a/bob/learn/misc/old/wiener_trainer.cc
+++ b/bob/learn/misc/old/wiener_trainer.cc
@@ -13,30 +13,30 @@
 
 using namespace boost::python;
 
-void py_train1(bob::trainer::WienerTrainer& t,
-  bob::machine::WienerMachine& m, bob::python::const_ndarray data)
+void py_train1(bob::learn::misc::WienerTrainer& t,
+  bob::learn::misc::WienerMachine& m, bob::python::const_ndarray data)
 {
   t.train(m, data.bz<double,3>());
 }
 
-object py_train2(bob::trainer::WienerTrainer& t,
+object py_train2(bob::learn::misc::WienerTrainer& t,
   bob::python::const_ndarray data)
 {
   const blitz::Array<double,3> data_ = data.bz<double,3>();
   const int height = data_.extent(1);
   const int width = data_.extent(2);
-  bob::machine::WienerMachine m(height, width, 0.);
+  bob::learn::misc::WienerMachine m(height, width, 0.);
   t.train(m, data_);
   return object(m);
 }
 
 void bind_trainer_wiener() {
 
-  class_<bob::trainer::WienerTrainer, boost::shared_ptr<bob::trainer::WienerTrainer> >("WienerTrainer", "Trains a WienerMachine on a given dataset.\nReference:\n'Computer Vision: Algorithms and Applications', Richard Szeliski\n(Part 3.4.3)", init<>((arg("self")), "Initializes a new WienerTrainer."))
-    .def(init<const bob::trainer::WienerTrainer&>((arg("self"), arg("other")), "Copy constructs a WienerTrainer"))
+  class_<bob::learn::misc::WienerTrainer, boost::shared_ptr<bob::learn::misc::WienerTrainer> >("WienerTrainer", "Trains a WienerMachine on a given dataset.\nReference:\n'Computer Vision: Algorithms and Applications', Richard Szeliski\n(Part 3.4.3)", init<>((arg("self")), "Initializes a new WienerTrainer."))
+    .def(init<const bob::learn::misc::WienerTrainer&>((arg("self"), arg("other")), "Copy constructs a WienerTrainer"))
     .def(self == self)
     .def(self != self)
-    .def("is_similar_to", &bob::trainer::WienerTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this WienerTrainer with the 'other' one to be approximately the same.")
+    .def("is_similar_to", &bob::learn::misc::WienerTrainer::is_similar_to, (arg("self"), arg("other"), arg("r_epsilon")=1e-5, arg("a_epsilon")=1e-8), "Compares this WienerTrainer with the 'other' one to be approximately the same.")
     .def("train", &py_train1, (arg("self"), arg("machine"), arg("data")), "Trains the provided WienerMachine with the given dataset.")
     .def("train", &py_train2, (arg("self"), arg("data")), "Trains a WienerMachine using the given dataset to perform the filtering. This method returns the trained WienerMachine.")
     ;
diff --git a/bob/learn/misc/old/ztnorm.cc b/bob/learn/misc/old/ztnorm.cc
index 898f512..9affb8f 100644
--- a/bob/learn/misc/old/ztnorm.cc
+++ b/bob/learn/misc/old/ztnorm.cc
@@ -36,7 +36,7 @@ static object ztnorm1(
   bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
   blitz::Array<double, 2> ret_ = ret.bz<double,2>();
 
-  bob::machine::ztNorm(rawscores_probes_vs_models_,
+  bob::learn::misc::ztNorm(rawscores_probes_vs_models_,
                        rawscores_zprobes_vs_models_,
                        rawscores_probes_vs_tmodels_,
                        rawscores_zprobes_vs_tmodels_,
@@ -65,7 +65,7 @@ static object ztnorm2(
   bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
   blitz::Array<double, 2> ret_ = ret.bz<double,2>();
 
-  bob::machine::ztNorm(rawscores_probes_vs_models_,
+  bob::learn::misc::ztNorm(rawscores_probes_vs_models_,
                        rawscores_zprobes_vs_models_,
                        rawscores_probes_vs_tmodels_,
                        rawscores_zprobes_vs_tmodels_,
@@ -87,7 +87,7 @@ static object tnorm(
   bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
   blitz::Array<double, 2> ret_ = ret.bz<double,2>();
 
-  bob::machine::tNorm(rawscores_probes_vs_models_,
+  bob::learn::misc::tNorm(rawscores_probes_vs_models_,
                        rawscores_probes_vs_tmodels_,
                        ret_);
 
@@ -107,7 +107,7 @@ static object znorm(
   bob::python::ndarray ret(bob::io::base::array::t_float64, rawscores_probes_vs_models_.extent(0), rawscores_probes_vs_models_.extent(1));
   blitz::Array<double, 2> ret_ = ret.bz<double,2>();
 
-  bob::machine::zNorm(rawscores_probes_vs_models_,
+  bob::learn::misc::zNorm(rawscores_probes_vs_models_,
                        rawscores_zprobes_vs_models_,
                        ret_);
 
-- 
GitLab