diff --git a/bob/example/faceverify/dct_ubm.py b/bob/example/faceverify/dct_ubm.py
index 53a2dbe7ea598a72d2ab03412d42dbfe13ef50c4..09319efa3de4ac544b4e3adaa9ad9e4fa0a3e289 100644
--- a/bob/example/faceverify/dct_ubm.py
+++ b/bob/example/faceverify/dct_ubm.py
@@ -24,7 +24,7 @@ import bob.db.atnt
 import bob.io.base
 import bob.io.image
 import bob.ip.base
-import bob.learn.misc
+import bob.learn.em
 import bob.measure
 
 import os, sys
@@ -76,14 +76,14 @@ def train(training_features, number_of_gaussians = NUMBER_OF_GAUSSIANS):
 
   input_size = training_set.shape[1]
   # create the KMeans and UBM machine
-  kmeans = bob.learn.misc.KMeansMachine(number_of_gaussians, input_size)
-  ubm = bob.learn.misc.GMMMachine(number_of_gaussians, input_size)
+  kmeans = bob.learn.em.KMeansMachine(number_of_gaussians, input_size)
+  ubm = bob.learn.em.GMMMachine(number_of_gaussians, input_size)
 
   # create the KMeansTrainer
-  kmeans_trainer = bob.learn.misc.KMeansTrainer()
+  kmeans_trainer = bob.learn.em.KMeansTrainer()
 
   # train using the KMeansTrainer
-  kmeans_trainer.train(kmeans, training_set)
+  bob.learn.em.train(kmeans_trainer, kmeans, training_set, max_iterations=10, convergence_threshold=0.001)
 
   [variances, weights] = kmeans.get_variances_and_weights_for_each_cluster(training_set)
   means = kmeans.means
@@ -94,21 +94,20 @@ def train(training_features, number_of_gaussians = NUMBER_OF_GAUSSIANS):
   ubm.weights = weights
 
   # train the GMM
-  trainer = bob.learn.misc.ML_GMMTrainer()
-  trainer.train(ubm, training_set)
-
+  trainer = bob.learn.em.ML_GMMTrainer()
+  bob.learn.em.train(trainer, ubm, training_set, max_iterations=10, convergence_threshold=0.001)
   return ubm
 
 
-def enroll(model_features, ubm, gmm_trainer):
+def enroll(model_features, ubm, gmm_trainer, max_iterations=1):
   """Enrolls the GMM model for the given model features (which should stem from the same identity)"""
   # create array set used for enrolling
   enroll_set = numpy.vstack(model_features)
   # create a GMM from the UBM
-  gmm = bob.learn.misc.GMMMachine(ubm)
+  gmm = bob.learn.em.GMMMachine(ubm)
 
   # train the GMM
-  gmm_trainer.train(gmm, enroll_set)
+  bob.learn.em.train(gmm_trainer, gmm, enroll_set,max_iterations=max_iterations)
 
   # return the resulting gmm
   return gmm
@@ -120,7 +119,7 @@ def stats(probe_feature, ubm):
   probe_feature = numpy.vstack([probe_feature])
 
   # Accumulate statistics
-  gmm_stats = bob.learn.misc.GMMStats(ubm.dim_c, ubm.dim_d)
+  gmm_stats = bob.learn.em.GMMStats(ubm.shape[0], ubm.shape[1])
   gmm_stats.init()
   ubm.acc_statistics(probe_feature, gmm_stats)
 
@@ -155,6 +154,7 @@ def main():
   ### UBM Training
   # load all training images
   training_files = db.training_files(protocol = protocol)
+  training_files = training_files
   print("Loading %d training images" % len(training_files))
   training_images = load_images(db, training_files, face_cropper, preprocessor)
 
@@ -166,13 +166,12 @@ def main():
 
   #####################################################################
   ### GMM model enrollment
-  gmm_trainer = bob.learn.misc.MAP_GMMTrainer()
-  gmm_trainer.max_iterations = 1
-  gmm_trainer.set_prior_gmm(ubm)
+  gmm_trainer = bob.learn.em.MAP_GMMTrainer(ubm, relevance_factor=4.0, update_means=True)
 
 
   # enroll a GMM model for each model identity (i.e., each client)
   model_ids = db.model_ids(groups = group)
+  model_ids = model_ids
   print("Enrolling %d GMM models" % len(model_ids))
   # generate models for each model ID
   models = {}
@@ -206,7 +205,7 @@ def main():
   negative_scores = []
 
   print("\nComputing scores")
-  distance_function = bob.learn.misc.linear_scoring
+  distance_function = bob.learn.em.linear_scoring
 
   # iterate through models and probes and compute scores
   model_count = 1
diff --git a/bob/example/faceverify/tests.py b/bob/example/faceverify/tests.py
index db076285e9f37fd1c3211e2d7ff0f226d1d3bd44..0e42691bd50a4c60ed08000123e4eafbf4e78b89 100644
--- a/bob/example/faceverify/tests.py
+++ b/bob/example/faceverify/tests.py
@@ -28,7 +28,7 @@ import bob.io.base
 import bob.io.base.test_utils
 import bob.learn.linear
 import bob.ip.gabor
-import bob.learn.misc
+import bob.learn.em
 
 import numpy
 
@@ -185,18 +185,18 @@ class FaceVerifyExampleTest(unittest.TestCase):
       ubm.save(bob.io.base.HDF5File(self.resource('dct_ubm.hdf5'), 'w'))
 
     # load GMM reference and check that it is still similar
-    ubm_ref = bob.learn.misc.GMMMachine(bob.io.base.HDF5File(self.resource('dct_ubm.hdf5')))
+    ubm_ref = bob.learn.em.GMMMachine(bob.io.base.HDF5File(self.resource('dct_ubm.hdf5')))
     self.assertTrue(ubm_ref.is_similar_to(ubm))
 
     # enroll a model with two features
-    enroller = bob.learn.misc.MAP_GMMTrainer()
-    enroller.max_iterations = 1
-    enroller.set_prior_gmm(ubm)
+    enroller = bob.learn.em.MAP_GMMTrainer(ubm, relevance_factor=0.0, update_means=True)
+    #enroller.max_iterations = 1
+    #enroller.set_prior_gmm(ubm)
     model = enroll(features[10:12], ubm, enroller)
     if regenerate_references:
       model.save(bob.io.base.HDF5File(self.resource('dct_model.hdf5'), 'w'))
 
-    model_ref = bob.learn.misc.GMMMachine(bob.io.base.HDF5File(self.resource('dct_model.hdf5')))
+    model_ref = bob.learn.em.GMMMachine(bob.io.base.HDF5File(self.resource('dct_model.hdf5')))
     self.assertTrue(model_ref.is_similar_to(model))
 
     # compute probe statistics
@@ -204,10 +204,10 @@ class FaceVerifyExampleTest(unittest.TestCase):
     if regenerate_references:
       probe.save(bob.io.base.HDF5File(self.resource('dct_probe.hdf5'), 'w'))
 
-    probe_ref = bob.learn.misc.GMMStats(bob.io.base.HDF5File(self.resource('dct_probe.hdf5')))
+    probe_ref = bob.learn.em.GMMStats(bob.io.base.HDF5File(self.resource('dct_probe.hdf5')))
     self.assertTrue(probe_ref.is_similar_to(probe))
 
     # compute score
-    score = bob.learn.misc.linear_scoring([model], ubm, [probe])[0,0]
+    score = bob.learn.em.linear_scoring([model], ubm, [probe])[0,0]
     self.assertAlmostEqual(score, 6975.2165874138391)
 
diff --git a/buildout.cfg b/buildout.cfg
index dc613b1f63bdff4c75137234fba9ff8ab017dd2d..330508b22c3673d62e2c8024586b5c39cbf20499 100644
--- a/buildout.cfg
+++ b/buildout.cfg
@@ -18,7 +18,7 @@ develop = src/bob.extension
           src/bob.measure
           src/bob.learn.activation
           src/bob.learn.linear
-          src/bob.learn.misc
+          src/bob.learn.em
           src/bob.ip.base
           src/bob.ip.color
           src/bob.ip.gabor
@@ -28,7 +28,7 @@ develop = src/bob.extension
           .
 
 ; options for bob.buildout extension
-debug = true
+debug = false
 verbose = true
 newest = false
 
@@ -43,7 +43,7 @@ bob.math = git https://github.com/bioidiap/bob.math
 bob.measure = git https://github.com/bioidiap/bob.measure
 bob.learn.activation = git https://github.com/bioidiap/bob.learn.activation
 bob.learn.linear = git https://github.com/bioidiap/bob.learn.linear
-bob.learn.misc = git https://github.com/bioidiap/bob.learn.misc
+bob.learn.em = git https://github.com/bioidiap/bob.learn.em
 bob.ip.base = git https://github.com/bioidiap/bob.ip.base
 bob.ip.color = git https://github.com/bioidiap/bob.ip.color
 bob.ip.gabor = git https://github.com/bioidiap/bob.ip.gabor
diff --git a/doc/examples.rst b/doc/examples.rst
index 018043eb2d7f382d3c7853c249bb03b8ccc31dc4..8f2cf1f96497141b3b449045dcb88657fd2df555 100644
--- a/doc/examples.rst
+++ b/doc/examples.rst
@@ -354,25 +354,25 @@ The UBM model is trained using a :py:class:`bob.learn.misc.KMeansTrainer` to est
 
 .. code-block:: python
 
-   >>> kmeans_machine = bob.learn.misc.KMeansMachine(...)
-   >>> kmeans_trainer = bob.learn.misc.KMeansTrainer()
-   >>> kmeans_trainer.train(kmeans, training_set)
+   >>> kmeans_machine = bob.learn.em.KMeansMachine(...)
+   >>> kmeans_trainer = bob.learn.em.KMeansTrainer()
+   >>> bob.learn.em.train(kmeans_trainer, kmeans, training_set)
 
 Afterward, the UBM is initialized with the results of the k-means training:
 
 .. code-block:: python
 
-   >>> ubm = bob.learn.misc.GMMMachine(...)
+   >>> ubm = bob.learn.em.GMMMachine(...)
    >>> ubm.means = kmeans_machine.means
    >>> [variances, weights] = kmeans_machine.get_variances_and_weights_for_each_cluster(training_set)
    >>> ubm.variances = variances
    >>> ubm.weights = weights
 
-and a :py:class:`bob.learn.misc.ML_GMMTrainer` is used to compute the actual UBM model:
+and a :py:class:`bob.learn.em.ML_GMMTrainer` is used to compute the actual UBM model:
 
 .. code-block:: python
 
-   >>> trainer = bob.learn.misc.ML_GMMTrainer()
+   >>> trainer = bob.learn.em.ML_GMMTrainer()
    >>> trainer.train(ubm, training_set)
 
 After UBM training, the next step is the model enrollment.
@@ -381,10 +381,10 @@ For that purpose, a :py:class:`bob.learn.misc.MAP_GMMTrainer` is used:
 
 .. code-block:: python
 
-   >>> gmm_trainer = bob.learn.misc.MAP_GMMTrainer()
+   >>> gmm_trainer = bob.learn.em.MAP_GMMTrainer()
    >>> enroll_set = numpy.vstack(enroll_features)
-   >>> model_gmm = bob.learn.misc.GMMMachine(ubm)
-   >>> gmm_trainer.train(model_gmm, model_feature_set)
+   >>> model_gmm = bob.learn.em.GMMMachine(ubm)
+   >>> bob.learn.em.train(gmm_trainer, model_gmm, model_feature_set)
 
 
 Also the probe image need some processing.
@@ -394,14 +394,14 @@ Afterward, the :py:class:`bob.learn.misc.GMMStats` statistics for each probe fil
 .. code-block:: python
 
    >>> probe_set = numpy.vstack([probe_feature])
-   >>> gmm_stats = bob.learn.misc.GMMStats()
+   >>> gmm_stats = bob.learn.em.GMMStats()
    >>> ubm.acc_statistics(probe_dct_blocks, gmm_stats)
 
 Finally, the scores for the probe files are computed using the :py:func:`bob.learn.misc.linear_scoring` function:
 
 .. code-block:: python
 
-   >>> score = bob.learn.misc.linear_scoring([model], ubm, [probe_stats])[0,0]
+   >>> score = bob.learn.em.linear_scoring([model], ubm, [probe_stats])[0,0]
 
 Again, the evaluation of the scores is identical to the previous examples.
 The expected ROC curve is:
diff --git a/setup.py b/setup.py
index 8d0a5a0217de4085724a4b001e8ec6149e22d9da..b65a90ff08ca986794f1ab31e7fb2172d90ec834 100644
--- a/setup.py
+++ b/setup.py
@@ -58,7 +58,7 @@ setup(
       "bob.ip.color",              # color image conversion
       "bob.ip.gabor",              # gabor graph
       "bob.learn.linear",          # eigenfaces
-      "bob.learn.misc",            # ubm-gmm
+      "bob.learn.em",              # ubm-gmm
       "bob.measure",               # computing ROC
       "bob.db.atnt",               # the AT&T (ORL) database of images
     ],