diff --git a/.travis.yml b/.travis.yml
index befdba6a218f851168f85620ee4701252da293af..62ecaf7cf0efcf384baf60a0c5139f6b769840e4 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -4,8 +4,8 @@ matrix:
   - python: 2.6
   - python: 2.7
     env:
-    - secure: lzPxGD45F6DRm108SBxkcsnM+zVH7p59/s34WVc6ZVlRI792xajoTJBC7pE087W01HPiofkVigqjCbsZvgDI9JggPgtOhE9Ugifzpm1vXRTZOlBXDx3fTsH/FxcHfWYRx8M3rnONgdNoyeBvw8mz+TKm6zCtNdZ+0IZEXSIDvhU=
-    - secure: ZgUPtwmsOIGnb4aevKHxm2YqTRsKKt+2MAcsgqhG8ClD4OOEUV7nyo2tVZt3RcoURjZGoCaLfWYI4MkzfwD/m1GjA1BcEi5DeLUEYvEIv3N69+eTldZBHCONL3heLbrmNHBLP0tyxHV9eSd2B1qsknn4ndyGXJm6Llu9J8Frv8E=
+    - secure: gBLc7oAwSQZtxHf/olpoPS1dmlA9nTcaFBBSTi62BPth8d8BlPrLoGluWuEIyyjbWgoVLbzNHLG0KL1zsjoV8ZWMIed7WSLZBGu+wU8K2gvgpTKwsFb0OEAix5bE2/QrMZtdPDM+izOMPlFGTT2xF2NFd3gn3N9dJxntTq/hcZA=
+    - secure: aMJ2P2EwYHrsweAFa8X9cm2nrBFFDGoRBH98cXAbOfkgrbHseoIEZN9nx/ItYteGZqv8DOi0lMlucSxU5OhvdLN+YoGn67TIb7kHtux8V2XfO57P5d/fFkJjqBVbBzXolqHvIXVkDEdriaDvFrvIospPiaPgaL8JdqcHA5k7lRQ=
   - python: 3.2
     env:
     - NUMPYSPEC===1.8.0
@@ -25,7 +25,7 @@ install:
 - python bootstrap.py
 - CFLAGS=-coverage ./bin/buildout
 script:
-- ./bin/python -c 'from bob.ip.gabor import get_config; print(get_config())'
+- ./bin/python -c 'from bob.example.faceverify import get_config; print(get_config())'
 - ./bin/coverage run --source=bob.example.faceverify ./bin/nosetests -sv
 - ./bin/sphinx-build -b doctest doc sphinx
 - ./bin/sphinx-build -b html doc sphinx
diff --git a/MANIFEST.in b/MANIFEST.in
index 0b80a7ac0379f55e0a1e0d2fe3513db0200fc276..71029447d5437e537abf924570dfcae1d524f0e1 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -2,5 +2,5 @@ include README.rst
 include bootstrap.py
 include buildout.cfg
 recursive-include doc *.rst *.png conf.py
-recursive-include xbob/example/faceverify/tests *.hdf5
+recursive-include bob/example/faceverify/tests *.hdf5
 
diff --git a/README.rst b/README.rst
index e7336fafdc4261ca85450884404d8c7567268e2e..5ab655960e9567e674386955c4c1d48ae8ac0e48 100644
--- a/README.rst
+++ b/README.rst
@@ -4,7 +4,7 @@ Face verification using Bob
 .. note::
   If you are reading this page through our GitHub portal and not through PyPI, note the development tip of the package may not be stable or become unstable in a matter of moments.
 
-  Go to http://pypi.python.org/pypi/xbob.example.faceverify to download the latest stable version of this package.
+  Go to http://pypi.python.org/pypi/bob.example.faceverify to download the latest stable version of this package.
 
 Overview
 --------
@@ -45,14 +45,14 @@ Download
 
 Finally, to download this package, you can extract the .zip file from the link below, or you open a shell in a directory of your choice and call::
 
-  $ wget https://pypi.python.org/packages/source/b/xbob.example.faceverify/xbob.example.faceverify-<version>.zip
-  $ unzip xbob.example.faceverify-<version>.zip
-  $ cd xbob.example.faceverify-<version>
+  $ wget https://pypi.python.org/packages/source/b/bob.example.faceverify/bob.example.faceverify-<version>.zip
+  $ unzip bob.example.faceverify-<version>.zip
+  $ cd bob.example.faceverify-<version>
 
 where <version> should be replaced with a (the current) version of this package, or you can clone our git repository::
 
-  $ git clone https://github.com/bioidiap/xbob.example.faceverify.git
-  $ cd xbob.example.faceverify
+  $ git clone https://github.com/bioidiap/bob.example.faceverify.git
+  $ cd bob.example.faceverify
 
 Afterwards, please call::
 
@@ -66,5 +66,5 @@ to generate the scripts that, amongst others, will run the face verification alg
 
 (or use any other browser of your choice).
 
-If you have questions to or problems with this package, please send a request to bob-devel@googlegroups.com, or file a bug under https://github.com/bioidiap/xbob.example.faceverify/issues.
+If you have questions to or problems with this package, please send a request to bob-devel@googlegroups.com, or file a bug under https://github.com/bioidiap/bob.example.faceverify/issues.
 
diff --git a/bob/example/faceverify/__init__.py b/bob/example/faceverify/__init__.py
index b54037ae4831cf7de2db0edd721a8add684c986e..7fd08b7b53bef2b57cc3729801185b8473af3cf3 100644
--- a/bob/example/faceverify/__init__.py
+++ b/bob/example/faceverify/__init__.py
@@ -17,4 +17,21 @@
 # You should have received a copy of the GNU General Public License
 # along with this program.  If not, see <http://www.gnu.org/licenses/>.
 
+def get_config():
+  """Returns a string containing the configuration information.
+  """
 
+  import pkg_resources
+
+  packages = pkg_resources.require(__name__)
+  this = packages[0]
+  deps = packages[1:]
+
+  retval =  "%s: %s (%s)\n" % (this.key, this.version, this.location)
+  retval += "  - python dependencies:\n"
+  for d in deps: retval += "    - %s: %s (%s)\n" % (d.key, d.version, d.location)
+
+  return retval.strip()
+
+# gets sphinx autodoc done right - don't remove it
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/example/faceverify/eigenface.py b/bob/example/faceverify/eigenface.py
index f5aac9632503496bb48a50d9e7cd4068038d0e72..7f8951437a25825543272b3c3ee4d154a4f753f0 100644
--- a/bob/example/faceverify/eigenface.py
+++ b/bob/example/faceverify/eigenface.py
@@ -46,7 +46,7 @@ def load_images(db, group = None, purpose = None, database_directory = None, ima
   # iterate through the list of file names
   images = {}
   for k in files:
-    # load image and linearize it into a vector
+    # load image
     images[k.id] = bob.io.base.load(k.make_path(database_directory, image_extension)).astype(numpy.float64)
   return images
 
@@ -116,8 +116,8 @@ def main():
   model_ids = [client.id for client in atnt_db.clients(groups = 'dev')]
   models = dict((model_id, []) for model_id in model_ids) # note: py26 compat.
   # iterate over model features
-  for key, image in model_features.iteritems():
-    model_id = atnt_db.get_client_id_from_file_id(key)
+  for file_id, image in model_features.iteritems():
+    model_id = atnt_db.get_client_id_from_file_id(file_id)
     # "enroll" model by collecting all model features of this client
     models[model_id].append(model_features[key])
 
diff --git a/bob/example/faceverify/tests/__init__.py b/bob/example/faceverify/tests/__init__.py
index 275beaefe3dce08a0666c89b09f761f8594d06a7..90ab8e1d1f8d7464d378dd72adf9789be48bed02 100644
--- a/bob/example/faceverify/tests/__init__.py
+++ b/bob/example/faceverify/tests/__init__.py
@@ -54,8 +54,8 @@ class FaceVerifyExampleTest(unittest.TestCase):
       self.m_database_dir = '/idiap/group/biometric/databases/orl'
     else:
       import tempfile
-      self.m_temp_dir = tempfile.mkdtemp('xbob_atnt_db')
-      from xbob.example.faceverify.utils import atnt_database_directory
+      self.m_temp_dir = tempfile.mkdtemp('bob_atnt_db')
+      from bob.example.faceverify.utils import atnt_database_directory
       self.m_database_dir = atnt_database_directory(self.m_temp_dir)
 
   def tearDown(self):
diff --git a/doc/conf.py b/doc/conf.py
index c971fc57a2726f949155327071b133c672f629be..c1fc00a0e111d7cd3bb0656c78c97d3926c0a0f5 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -25,7 +25,17 @@ import sys, os
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
-extensions = []
+extensions = [
+  'sphinx.ext.todo',
+  'sphinx.ext.coverage',
+  'sphinx.ext.pngmath',
+  'sphinx.ext.ifconfig',
+  'sphinx.ext.autodoc',
+  'sphinx.ext.autosummary',
+  'sphinx.ext.doctest',
+  'sphinx.ext.intersphinx',
+  'matplotlib.sphinxext.plot_directive',
+]
 
 # Add any paths that contain templates here, relative to this directory.
 templates_path = ['_templates']
diff --git a/doc/examples.rst b/doc/examples.rst
index bf5a542c16aa79368bdb35191bb60a9f10edfaca..ffc92c8af35f7237c7a7f740bfd0bfec2e0768c3 100644
--- a/doc/examples.rst
+++ b/doc/examples.rst
@@ -1,6 +1,34 @@
-=====================
+.. vim: set fileencoding=utf-8 :
+.. Manuel Guenther <manuek.guether@idiap.ch>
+.. Mon Aug 25 19:15:29 CEST 2014
+
+======================
  Running the examples
-=====================
+======================
+
+.. testsetup:: *
+
+  from __future__ import print_function
+  import numpy
+  import scipy.spatial
+  import bob.db.atnt
+  import bob.io.base
+  import bob.io.image
+  import bob.ip.base
+  import bob.ip.gabor
+  import bob.learn.linear
+  import bob.measure
+
+  training_features = [(numpy.random.rand(400) * 255) for i in range(20)]
+  probe_features = [(numpy.random.rand(400) * 255) for i in range(5)]
+  model_features = {1: [(numpy.random.rand(400) * 255) for i in range(5)]}
+  models = [[(numpy.random.rand(400) * 255) for i in range(5)]]
+  training_image = numpy.random.rand(20,20) * 255
+  model_image = numpy.random.rand(20,20) * 255
+  probe_image = numpy.random.rand(20,20) * 255
+
+  positives = numpy.random.rand(100)
+  negatives = numpy.random.rand(500) - 0.5
 
 To run the examples, just call the scripts from within the ``bin`` directory, e.g.:
 
@@ -24,7 +52,7 @@ There are three example scripts:
 
 that perform more or less complicated face verification experiments using an *unbiased* evaluation protocol.
 Each experiment creates an ROC curve that contains the final verification result of the test.
-The generated files will be ``eigenface.png``, ``gabor_graph.png``, and ``dct_ubm.png``.
+The generated files will be ``eigenface.pdf``, ``gabor_graph.pdf``, and ``dct_ubm.pdf``.
 
 Since the complexity of the algorithms increase, the expected execution time of them differ a lot.
 While the eigenface example should be finished in a couple of seconds, the Gabor phase example could take some minutes, and the UBM/GMM model needs in the order of half an hour to compute.
@@ -40,82 +68,91 @@ The eigenface example
 The eigenface example follows the work-flow that is presented in the original paper *Eigenfaces for Recognition* [TP91]_ by Turk and Pentland.
 First, it creates an object to query the database:
 
-.. code-block:: python
+.. doctest::
 
-  >>> atnt_db = xbob.db.atnt.Database()
+  >>> atnt_db = bob.db.atnt.Database()
 
-For training the projection matrix, the training images need to be read:
+For training the projection matrix, the training images (which are in gray-scale) need to be read:
 
-.. code-block:: python
+.. doctest::
 
-  >>> training_image_files = atnt_db.files(groups = 'train', ...)
-  >>> for filename in training_image_files.values():
-  ...   training_image = bob.io.load(filename)
+  >>> training_image_files = atnt_db.objects(groups = 'world')
+  >>> for training_file in training_image_files:
+  ...   # load image
+  ...   training_image = bob.io.base.load(training_file.make_path(...)) #doctest:+SKIP
+  ...   # linearize pixels
+  ...   training_feature = training_image.flatten()
 
-Since the images are already aligned to the eye positions, they can simply be linearized (converted into one long vector) and put into a 2D array
-with one sample in each row:
+Since the images are already cropped, they can simply be linearized (converted into one long vector) and put into a 2D array with one sample in each row:
 
-.. code-block:: python
+.. doctest::
 
-  >>> training_set = numpy.vstack([image.flatten() for image in training_images.values()])
+  >>> training_set = numpy.vstack(training_features)
 
-which is used to train a ``bob.machine.LinearMachine``:
+which is used to train a :py:class:`bob.learn.linear.Machine`:
 
-.. code-block:: python
+.. doctest::
 
-  >>> pca_trainer = bob.trainer.PCATrainer()
+  >>> pca_trainer = bob.learn.linear.PCATrainer()
   >>> pca_machine, eigen_values = pca_trainer.train(training_set)
 
 For some distance functions, the eigenvalues are needed, but in our example we just ignore them.
 
 After training, the model and probe images are loaded, linearized, and projected into the eigenspace using the trained ``pca_machine``:
 
-.. code-block:: python
+.. doctest::
 
-  >>> model_image_files = atnt_db.files(groups = 'test', purpose = 'enrol', ...)
-  >>> for filename in model_image_files.values():
-  ...   model_image = bob.io.load(filename)
-  ...   model_feature = pca_machine(model_image.flatten())
+  >>> model_image_files = atnt_db.objects(groups = 'dev', purposes = 'enrol')
+  >>> for model_file in model_image_files:
+  ...   # load image
+  ...   model_image = bob.io.base.load(model_file.make_path(...)) #doctest:+SKIP
+  ...   # project to PCA subspace
+  ...   model_feature = pca_machine.forward(model_image.flatten())
 
-  >>> probe_image_files = atnt_db.files(groups = 'test', purpose = 'probe', ...)
-  >>> for filename in probe_image_files.values():
-  ...   probe_image = bob.io.load(filename)
-  ...   probe_feature = pca_machine(probe_image.flatten())
+  >>> probe_image_files = atnt_db.objects(groups = 'dev', purposes = 'probe')
+  >>> for probe_file in probe_image_files:
+  ...   # load image
+  ...   probe_image = bob.io.base.load(probe_file.make_path(...)) #doctest:+SKIP
+  ...   # project to PCA subspace
+  ...   model_feature = pca_machine.forward(probe_image.flatten())
 
 To follow the evaluation protocol, we *enroll* a client model for each client, simply by collecting all model feature vectors:
 
-.. code-block:: python
+.. doctest::
 
   >>> model_ids = [client.id for client in atnt_db.clients(groups = 'dev')]
-  >>> for model_feature_id in model_features:
-  ...   model_id = atnt_db.get_client_id_from_file_id(model_feature_id)
-  ...   models[model_id].append(model_features[model_feature_id])
+  >>> for model_image_id in model_features:
+  ...   # query the database for the model id of the current file id
+  ...   model_id = atnt_db.get_client_id_from_file_id(model_image_id)
+  ...   # append feature for the current model id
+  ...   #models[model_id].append(model_features[model_feature_id])
 
 
 To compute the verification result, each model feature is compared to each probe feature by computing the Euclidean distance:
 
-.. code-block:: python
+.. doctest::
 
-  >>> for model in model:
+  >>> for model in models:
   ...  for probe_feature in probe_features:
   ...    for model_feature in model:
-  ...      score = bob.math.euclidean_distance(model_feature, probe_feature)
+  ...      score = scipy.spatial.distance.euclidean(model_feature, probe_feature)
 
 Finally, all scores of one model and one probe are averaged to get the final score for this pair.
 
 The results are divided into a list of positive scores (model and probe are from the same identity) and a a list of negative scores (identities of model and probe differ).
 Using these lists, the ROC curve is plotted:
 
-.. code-block:: python
+.. doctest::
 
-  >>> bob.measure.plot.roc(negatives, positives)
+  >>> bob.measure.plot.roc(negatives, positives) #doctest:+ELLIPSIS
+  [...]
 
 .. image:: eigenface.png
   :scale: 100 %
 
 and the performance is computed:
 
-.. code-block:: python
+.. doctest::
 
   >>> threshold = bob.measure.eer_threshold(negatives, positives)
   >>> FAR, FRR = bob.measure.farfrr(negatives, positives, threshold)
@@ -134,9 +171,9 @@ A better face verification example uses Gabor jet features [WFKM97]_ .
 In this example we do not define a face graph, but instead we use the Gabor jets at several grid positions in the image.
 To do that, we define:
 
-.. code-block:: python
+.. doctest::
 
-  >>> graph_machine = bob.machine.GaborGraphMachine((8,6), (104,86), (4,4))
+  >>> graph = bob.ip.gabor.Graph((8,6), (104,86), (4,4))
 
 that will create Gabor graphs with node positions from (8,6) to (104,86) with step size (4,4).
 
@@ -150,37 +187,27 @@ that will create Gabor graphs with node positions from (8,6) to (104,86) with st
   The Gabor graph extraction does not require a training stage.
   Therefore, in opposition to the eigenface example, the training images are not used in this example.
 
-Now, the Gabor graph features can be extracted from the model and probe images:
+Now, the Gabor graph features can be extracted from the model and probe images.
+Here is the code for the model graphs only:
 
-.. code-block:: python
-
-  >>> model_image_files = atnt_db.files(groups = 'test', purpose = 'enrol', ...)
-  >>> for filename in model_image_files.values():
-  ...   model_image = bob.io.load(filename)
-  ...   # ... some steps to create the Gabor jet image ...
-  ...   graph_machine(jet_image, model_feature)
-
-  >>> probe_image_files = atnt_db.files(groups = 'test', purpose = 'probe', ...)
-  >>> for filename in probe_image_files.values():
-  ...   probe_image = bob.io.load(filename)
-  ...   # ... some steps to create the Gabor jet image ...
-  ...   graph_machine(jet_image, probe_feature)
+.. doctest::
 
-For model enrollment, again we simply collect all enrollment features:
-
-.. code-block:: python
-
-  >>> model_ids = [client.id for client in atnt_db.clients(groups = 'dev')]
-  >>> for key, image in model_features.iteritems():
-  ...   model_id = atnt_db.get_client_id_from_file_id(key)
-  ...   models[model_id].append(model_features[key])
+  >>> gabor_wavelet_transform = bob.ip.gabor.Transform()
+  >>> model_image_files = atnt_db.objects(groups = 'dev', purposes = 'enrol')
+  >>> for model_file in model_image_files:
+  ...   # load image
+  ...   model_image = bob.io.base.load(model_file.make_path(...)) #doctest:+SKIP
+  ...   # create the Gabor transformed image
+  ...   trafo_image = gabor_wavelet_transform.transform(model_image)
+  ...   model_feature = graph.extract(model_image)
 
+For model enrollment, as above we simply collect all enrollment features.
 To compare the Gabor graphs, several methods can be applied.
 Again, many choices for the Gabor jet comparison exist, here we take a novel Gabor phase based similarity function [GHW12]_:
 
-.. code-block:: python
+.. doctest::
 
-  >>> SIMILARITY_FUNCTION = bob.machine.GaborJetSimilarity(bob.machine.gabor_jet_similarity_type.PHASE_DIFF_PLUS_CANBERRA, gabor_wavelet_transform)
+  >>> similarity_function = bob.ip.gabor.Similarity("PhaseDiffPlusCanberra", gabor_wavelet_transform)
 
 Since we have several local features, we can exploit this fact.
 For each local position, we compute the similarity between the probe feature at this position and all model features and take the maximum value:
@@ -190,8 +217,8 @@ For each local position, we compute the similarity between the probe feature at
   >>> for model_id in model_ids:
   ...  for probe_feature in probe_features:
   ...    for model_feature in models[model_id]:
-  ...      for node_index in range(probe_feature.shape[0]):
-  ...        scores[...] = SIMILARITY_FUNCTION(model_feature[node_index], probe_feature[node_index])
+  ...      for node_index in range(len(probe_feature)):
+  ...        scores[...] = similarity_function.similarity(model_feature[node_index], probe_feature[node_index])
   ...    score = numpy.average(numpy.max(scores, axis = 0))
 
 The evaluation is identical to the evaluation in the eigenface example.
@@ -210,44 +237,48 @@ The last example shows a quite complicated, but successful algorithm.
 The first step is the feature extraction of the training image features and the collection of them in a 2D array.
 In this experiment we will use *Discrete Cosine Transform* (DCT) block features [MM09]_:
 
+.. doctest::
+
+  >>> dct_extractor = bob.ip.base.DCTFeatures(45, (12, 12), (11, 11))
+  >>> training_image_files = atnt_db.objects(groups = 'world')
+  >>> for training_file in training_image_files:
+  ...   # load training image
+  ...   training_image = bob.io.base.load(training_file.make_path(...))  #doctest:+SKIP
+  ...   # extract DCT block features
+  ...   training_features = dct_extractor(training_image)
+
+Hence, from every image, several DCT block features are extracted independently.
+All these features are mixed together to build the training set
+
 .. code-block:: python
 
-  >>> training_image_files = atnt_db.files(groups = 'train', ...)
-  >>> training_set_list = []
-  >>> for filename in training_image_files.values():
-  ...   training_image = bob.io.load(filename)
-  ...   # ... prepare image blocks ...
-  ...   bob.ip.block(training_image, training_image_blocks, ...)
-  ...   # ... create DCT extractor ...
-  ...   training_dct_blocks = dct_extractor(training_image_blocks)
-  ...   training_set_list.append(training_dct_blocks)
-  >>> training_set = numpy.vstack(training_set_list)
+  >>> training_set = numpy.vstack(training_features_list)
 
 With these training features, a *universal background model* (UBM) is computed [RQD00]_.
 It is a *Gaussian Mixture Model* (GMM) that holds information about the overall distribution of DCT features in facial images.
-The UBM model is trained using a bob.trainer.KMeansTrainer to estimate the means of the Gaussians:
+The UBM model is trained using a :py:class:`bob.learn.misc.KMeansTrainer` to estimate the means of the Gaussians:
 
 .. code-block:: python
 
-  >>> kmeans_machine = bob.machine.KMeansMachine(...)
-  >>> kmeans_trainer = bob.trainer.KMeansTrainer()
+  >>> kmeans_machine = bob.learn.misc.KMeansMachine(...)
+  >>> kmeans_trainer = bob.learn.misc.KMeansTrainer()
   >>> kmeans_trainer.train(kmeans, training_set)
 
 Afterward, the UBM is initialized with the results of the k-means training:
 
 .. code-block:: python
 
-  >>> ubm = bob.machine.GMMMachine(...)
+  >>> ubm = bob.learn.misc.GMMMachine(...)
   >>> ubm.means = kmeans_machine.means
   >>> [variances, weights] = kmeans_machine.get_variances_and_weights_for_each_cluster(training_set)
   >>> ubm.variances = variances
   >>> ubm.weights = weights
 
-and a bob.trainer.ML_GMMTrainer is used to compute the actual UBM model:
+and a :py:class:`bob.learn.misc.ML_GMMTrainer` is used to compute the actual UBM model:
 
 .. code-block:: python
 
-  >>> trainer = bob.trainer.ML_GMMTrainer()
+  >>> trainer = bob.learn.misc.ML_GMMTrainer()
   >>> trainer.train(ubm, training_set)
 
 
@@ -257,24 +288,24 @@ For this purpose, we need to get the model images sorted by identity:
 
 .. code-block:: python
 
-  >>> model_ids = atnt_db.client_ids(groups = 'test')
+  >>> model_ids = atnt_db.client_ids(groups = 'dev')
 
 Now, we load the images for each identity, extract the DCT features and enroll a model for each identity.
-For that purpose, a **bob.trainer.MAP_GMMTrainer** is used:
+For that purpose, a :py:class:`bob.learn.misc.MAP_GMMTrainer` is used:
 
 .. code-block:: python
 
-  >>> gmm_trainer = bob.trainer.MAP_GMMTrainer()
+  >>> gmm_trainer = bob.learn.misc.MAP_GMMTrainer()
   >>> # ... initialize GMM trainer ...
   >>> for model_id in model_ids:
-  ...   model_filenames = db.files(groups = 'test', purposes = 'enrol', client_ids = model_id, ...)
+  ...   model_files = db.objects(groups = 'test', purposes = 'enrol', client_ids = model_id)
   ...   model_feature_set_list = []
-  ...   for filename in model_filenames.values():
-  ...     # ... load image and extract model image blocks ...
-  ...     model_dct_blocks = dct_extractor(model_image_blocks)
+  ...   for model_file in model_files:
+  ...     # ... load model image ...
+  ...     model_dct_blocks = dct_extractor(model_image)
   ...     model_feature_set_list.append(model_dct_blocks)
   ...   model_feature_set = numpy.vstack(model_feature_set_list)
-  ...   model_gmm = bob.machine.GMMMachine(ubm)
+  ...   model_gmm = bob.learn.misc.GMMMachine(ubm)
   ...   gmm_trainer.train(model_gmm, model_feature_set)
 
 
@@ -284,11 +315,11 @@ Afterward, the statistics for each probe file are generated:
 
 .. code-block:: python
 
-  >>> probe_image_files = atnt_db.files(groups = 'test', purposes = 'probe', ...)
-  >>> for filename in probe_image_files.values():
-  ...   # ... load image and extract probe image blocks ...
+  >>> probe_image_files = atnt_db.objects(groups = 'test', purposes = 'probe', ...)
+  >>> for probe_file in probe_image_files:
+  ...   # ... load probe image ...
   ...   probe_dct_blocks = dct_extractor(probe_image_blocks)
-  ...   probe_gmm_stats = bob.machine.GMMStats()
+  ...   probe_gmm_stats = bob.learn.misc.GMMStats()
   ...   gmm_stats.init()
   ...   ubm.acc_statistics(probe_dct_blocks, probe_gmm_stats)
 
@@ -298,7 +329,7 @@ Finally, the scores for the probe files are computed using the function **bob.ma
 
   >>> for model_gmm in models:
   ...  for probe_gmm_stats in probes:
-  ...    score = bob.machine.linear_scoring([model_gmm], ubm, [probe_gmm_stats])[0,0]
+  ...    score = bob.learn.misc.linear_scoring([model_gmm], ubm, [probe_gmm_stats])[0,0]
 
 Again, the evaluation of the scores is identical to the previous examples.
 The expected ROC curve is:
diff --git a/doc/installation.rst b/doc/installation.rst
index 664e3d5b3c420814e5c533ca81d33149c352490d..3d83c33ca2a7929fb39a028d2362c0c084c04652 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -9,15 +9,15 @@
 
   .. code-block:: sh
 
-    $ wget https://pypi.python.org/packages/source/b/xbob.example.faceverify/xbob.example.faceverify-<version>.zip
-    $ unzip xbob.example.faceverify-<version>.zip
-    $ cd xbob.example.faceverify-<version>
+    $ wget https://pypi.python.org/packages/source/b/bob.example.faceverify/bob.example.faceverify-<version>.zip
+    $ unzip bob.example.faceverify-<version>.zip
+    $ cd bob.example.faceverify-<version>
 
-  where ``<version>`` is the latest version of the package that you can find under https://pypi.python.org/pypi/xbob.example.faceverify, or by cloning our git repository
+  where ``<version>`` is the latest version of the package that you can find under https://pypi.python.org/pypi/bob.example.faceverify, or by cloning our git repository
 
   .. code-block:: sh
 
-    $ git clone https://github.com/bioidiap/xbob.example.faceverify.git
+    $ git clone https://github.com/bioidiap/bob.example.faceverify.git
     $ cd bob.example.faceverify
 
 Installation of this example uses the `buildout <http://www.buildout.org/>`_ build environment.