From 41592c08b18fe9e9ca5cbf0ca3151d1880df0cb6 Mon Sep 17 00:00:00 2001
From: Andre Anjos <andre.dos.anjos@gmail.com>
Date: Mon, 26 May 2014 18:57:38 +0200
Subject: [PATCH] xbob -> bob

---
 .travis.yml                                   |   4 +-
 MANIFEST.in                                   |   2 +-
 README.rst                                    |  24 +--
 {xbob => bob}/__init__.py                     |   0
 {xbob => bob}/learn/__init__.py               |   0
 .../learn/misc/GaborWaveletTransform.cpp      |   0
 {xbob => bob}/learn/misc/__init__.py          |   0
 {xbob => bob}/learn/misc/bic.cpp              |   0
 {xbob => bob}/learn/misc/bic_trainer.cpp      |   0
 {xbob => bob}/learn/misc/blitz_numpy.cpp      |   0
 {xbob => bob}/learn/misc/data/data.hdf5       | Bin
 .../learn/misc/data/dataNormalized.hdf5       | Bin
 {xbob => bob}/learn/misc/data/dataforMAP.hdf5 | Bin
 .../learn/misc/data/faithful.torch3.hdf5      | Bin
 .../learn/misc/data/faithful.torch3_f64.hdf5  | Bin
 .../learn/misc/data/gmm.init_means.hdf5       | Bin
 .../learn/misc/data/gmm.init_variances.hdf5   | Bin
 .../learn/misc/data/gmm.init_weights.hdf5     | Bin
 {xbob => bob}/learn/misc/data/gmm_MAP.hdf5    | Bin
 {xbob => bob}/learn/misc/data/gmm_ML.hdf5     | Bin
 .../learn/misc/data/gmm_ML_32bit_debug.hdf5   | Bin
 .../learn/misc/data/gmm_ML_32bit_release.hdf5 | Bin
 {xbob => bob}/learn/misc/data/means.hdf5      | Bin
 .../learn/misc/data/meansAfterKMeans.hdf5     | Bin
 .../learn/misc/data/meansAfterMAP.hdf5        | Bin
 .../learn/misc/data/meansAfterML.hdf5         | Bin
 .../learn/misc/data/new_adapted_mean.hdf5     | Bin
 .../learn/misc/data/samplesFrom2G_f64.hdf5    | Bin
 {xbob => bob}/learn/misc/data/stats.hdf5      | Bin
 {xbob => bob}/learn/misc/data/variances.hdf5  | Bin
 .../learn/misc/data/variancesAfterKMeans.hdf5 | Bin
 .../learn/misc/data/variancesAfterMAP.hdf5    | Bin
 .../learn/misc/data/variancesAfterML.hdf5     | Bin
 {xbob => bob}/learn/misc/data/weights.hdf5    | Bin
 .../learn/misc/data/weightsAfterKMeans.hdf5   | Bin
 .../learn/misc/data/weightsAfterMAP.hdf5      | Bin
 .../learn/misc/data/weightsAfterML.hdf5       | Bin
 .../learn/misc/data/ztnorm_eval_eval.hdf5     | Bin
 .../learn/misc/data/ztnorm_eval_tnorm.hdf5    | Bin
 .../learn/misc/data/ztnorm_result.hdf5        | Bin
 .../learn/misc/data/ztnorm_znorm_eval.hdf5    | Bin
 .../learn/misc/data/ztnorm_znorm_tnorm.hdf5   | Bin
 {xbob => bob}/learn/misc/empca_trainer.cpp    |   0
 {xbob => bob}/learn/misc/exception.h          |   0
 {xbob => bob}/learn/misc/gabor.cpp            |   0
 {xbob => bob}/learn/misc/gaussian.cpp         |   0
 {xbob => bob}/learn/misc/gmm.cpp              |   0
 {xbob => bob}/learn/misc/gmm_trainer.cpp      |   0
 {xbob => bob}/learn/misc/hdf5.cpp             |   0
 {xbob => bob}/learn/misc/ivector.cpp          |   0
 {xbob => bob}/learn/misc/ivector_trainer.cpp  |   0
 {xbob => bob}/learn/misc/jfa.cpp              |   0
 {xbob => bob}/learn/misc/jfa_trainer.cpp      |   0
 {xbob => bob}/learn/misc/kmeans.cpp           |   0
 {xbob => bob}/learn/misc/kmeans_trainer.cpp   |   0
 {xbob => bob}/learn/misc/linearscoring.cpp    |   0
 {xbob => bob}/learn/misc/machine.cpp          |   0
 {xbob => bob}/learn/misc/main.cpp             |   0
 {xbob => bob}/learn/misc/ndarray.cpp          |   0
 {xbob => bob}/learn/misc/ndarray.h            |   0
 {xbob => bob}/learn/misc/ndarray_numpy.cpp    |   0
 {xbob => bob}/learn/misc/plda.cpp             |   0
 {xbob => bob}/learn/misc/plda_trainer.cpp     |   0
 {xbob => bob}/learn/misc/random.cpp           |   0
 {xbob => bob}/learn/misc/test_bic.py          |   0
 {xbob => bob}/learn/misc/test_em.py           |  62 +++---
 {xbob => bob}/learn/misc/test_gaussian.py     |   2 +-
 {xbob => bob}/learn/misc/test_gmm.py          |  14 +-
 {xbob => bob}/learn/misc/test_ivector.py      |   0
 .../learn/misc/test_ivector_trainer.py        |   0
 {xbob => bob}/learn/misc/test_jfa.py          |   2 +-
 {xbob => bob}/learn/misc/test_jfa_trainer.py  |   0
 {xbob => bob}/learn/misc/test_kmeans.py       |   2 +-
 .../learn/misc/test_kmeans_trainer.py         |  22 +-
 .../learn/misc/test_linearscoring.py          |   0
 {xbob => bob}/learn/misc/test_plda.py         |   2 +-
 {xbob => bob}/learn/misc/test_plda_trainer.py |   0
 {xbob => bob}/learn/misc/test_wiener.py       |   8 +-
 .../learn/misc/test_wiener_trainer.py         |   4 +-
 {xbob => bob}/learn/misc/test_ztnorm.py       |  14 +-
 {xbob => bob}/learn/misc/tinyvector.cpp       |   0
 {xbob => bob}/learn/misc/version.cpp          |  24 +--
 {xbob => bob}/learn/misc/wiener.cpp           |   0
 {xbob => bob}/learn/misc/wiener_trainer.cpp   |   0
 {xbob => bob}/learn/misc/ztnorm.cpp           |   0
 buildout.cfg                                  |  28 +--
 doc/conf.py                                   |  12 +-
 doc/guide.rst                                 | 198 +++++++++---------
 doc/py_api.rst                                |   4 +-
 setup.py                                      |  86 ++++----
 90 files changed, 257 insertions(+), 257 deletions(-)
 rename {xbob => bob}/__init__.py (100%)
 rename {xbob => bob}/learn/__init__.py (100%)
 rename {xbob => bob}/learn/misc/GaborWaveletTransform.cpp (100%)
 rename {xbob => bob}/learn/misc/__init__.py (100%)
 rename {xbob => bob}/learn/misc/bic.cpp (100%)
 rename {xbob => bob}/learn/misc/bic_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/blitz_numpy.cpp (100%)
 rename {xbob => bob}/learn/misc/data/data.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/dataNormalized.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/dataforMAP.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/faithful.torch3.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/faithful.torch3_f64.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm.init_means.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm.init_variances.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm.init_weights.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm_MAP.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm_ML.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm_ML_32bit_debug.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/gmm_ML_32bit_release.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/means.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/meansAfterKMeans.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/meansAfterMAP.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/meansAfterML.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/new_adapted_mean.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/samplesFrom2G_f64.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/stats.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/variances.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/variancesAfterKMeans.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/variancesAfterMAP.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/variancesAfterML.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/weights.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/weightsAfterKMeans.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/weightsAfterMAP.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/weightsAfterML.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/ztnorm_eval_eval.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/ztnorm_eval_tnorm.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/ztnorm_result.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/ztnorm_znorm_eval.hdf5 (100%)
 rename {xbob => bob}/learn/misc/data/ztnorm_znorm_tnorm.hdf5 (100%)
 rename {xbob => bob}/learn/misc/empca_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/exception.h (100%)
 rename {xbob => bob}/learn/misc/gabor.cpp (100%)
 rename {xbob => bob}/learn/misc/gaussian.cpp (100%)
 rename {xbob => bob}/learn/misc/gmm.cpp (100%)
 rename {xbob => bob}/learn/misc/gmm_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/hdf5.cpp (100%)
 rename {xbob => bob}/learn/misc/ivector.cpp (100%)
 rename {xbob => bob}/learn/misc/ivector_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/jfa.cpp (100%)
 rename {xbob => bob}/learn/misc/jfa_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/kmeans.cpp (100%)
 rename {xbob => bob}/learn/misc/kmeans_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/linearscoring.cpp (100%)
 rename {xbob => bob}/learn/misc/machine.cpp (100%)
 rename {xbob => bob}/learn/misc/main.cpp (100%)
 rename {xbob => bob}/learn/misc/ndarray.cpp (100%)
 rename {xbob => bob}/learn/misc/ndarray.h (100%)
 rename {xbob => bob}/learn/misc/ndarray_numpy.cpp (100%)
 rename {xbob => bob}/learn/misc/plda.cpp (100%)
 rename {xbob => bob}/learn/misc/plda_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/random.cpp (100%)
 rename {xbob => bob}/learn/misc/test_bic.py (100%)
 rename {xbob => bob}/learn/misc/test_em.py (70%)
 rename {xbob => bob}/learn/misc/test_gaussian.py (99%)
 rename {xbob => bob}/learn/misc/test_gmm.py (94%)
 rename {xbob => bob}/learn/misc/test_ivector.py (100%)
 rename {xbob => bob}/learn/misc/test_ivector_trainer.py (100%)
 rename {xbob => bob}/learn/misc/test_jfa.py (99%)
 rename {xbob => bob}/learn/misc/test_jfa_trainer.py (100%)
 rename {xbob => bob}/learn/misc/test_kmeans.py (98%)
 rename {xbob => bob}/learn/misc/test_kmeans_trainer.py (90%)
 rename {xbob => bob}/learn/misc/test_linearscoring.py (100%)
 rename {xbob => bob}/learn/misc/test_plda.py (99%)
 rename {xbob => bob}/learn/misc/test_plda_trainer.py (100%)
 rename {xbob => bob}/learn/misc/test_wiener.py (94%)
 rename {xbob => bob}/learn/misc/test_wiener_trainer.py (95%)
 rename {xbob => bob}/learn/misc/test_ztnorm.py (90%)
 rename {xbob => bob}/learn/misc/tinyvector.cpp (100%)
 rename {xbob => bob}/learn/misc/version.cpp (86%)
 rename {xbob => bob}/learn/misc/wiener.cpp (100%)
 rename {xbob => bob}/learn/misc/wiener_trainer.cpp (100%)
 rename {xbob => bob}/learn/misc/ztnorm.cpp (100%)

diff --git a/.travis.yml b/.travis.yml
index b78ce7b..75791a7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -28,8 +28,8 @@ install:
   - "python bootstrap.py"
   - "CFLAGS=-coverage ./bin/buildout"
 script:
-  - "./bin/python -c 'from xbob.learn.misc import get_config; print(get_config())'"
-  - "./bin/coverage run --source=xbob.learn.misc ./bin/nosetests -sv"
+  - "./bin/python -c 'from bob.learn.misc import get_config; print(get_config())'"
+  - "./bin/coverage run --source=bob.learn.misc ./bin/nosetests -sv"
   - "./bin/sphinx-build -b doctest doc sphinx"
   - "./bin/sphinx-build -b html doc sphinx"
 after_success:
diff --git a/MANIFEST.in b/MANIFEST.in
index 09ed020..4b6cf9d 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,3 +1,3 @@
 include LICENSE README.rst bootstrap.py buildout.cfg
 recursive-include doc conf.py *.rst
-recursive-include xbob *.cpp *.h
+recursive-include bob *.cpp *.h
diff --git a/README.rst b/README.rst
index d6e3ab7..1e7eaf6 100644
--- a/README.rst
+++ b/README.rst
@@ -2,16 +2,16 @@
 .. Andre Anjos <andre.anjos@idiap.ch>
 .. Thu 22 May 2014 15:39:03 CEST
 
-.. image:: https://travis-ci.org/bioidiap/xbob.learn.misc.svg?branch=master
-   :target: https://travis-ci.org/bioidiap/xbob.learn.misc
-.. image:: https://coveralls.io/repos/bioidiap/xbob.learn.misc/badge.png
-   :target: https://coveralls.io/r/bioidiap/xbob.learn.misc
-.. image:: http://img.shields.io/github/tag/bioidiap/xbob.learn.misc.png
-   :target: https://github.com/bioidiap/xbob.learn.misc
-.. image:: http://img.shields.io/pypi/v/xbob.learn.misc.png
-   :target: https://pypi.python.org/pypi/xbob.learn.misc
-.. image:: http://img.shields.io/pypi/dm/xbob.learn.misc.png
-   :target: https://pypi.python.org/pypi/xbob.learn.misc
+.. image:: https://travis-ci.org/bioidiap/bob.learn.misc.svg?branch=master
+   :target: https://travis-ci.org/bioidiap/bob.learn.misc
+.. image:: https://coveralls.io/repos/bioidiap/bob.learn.misc/badge.png
+   :target: https://coveralls.io/r/bioidiap/bob.learn.misc
+.. image:: http://img.shields.io/github/tag/bioidiap/bob.learn.misc.png
+   :target: https://github.com/bioidiap/bob.learn.misc
+.. image:: http://img.shields.io/pypi/v/bob.learn.misc.png
+   :target: https://pypi.python.org/pypi/bob.learn.misc
+.. image:: http://img.shields.io/pypi/dm/bob.learn.misc.png
+   :target: https://pypi.python.org/pypi/bob.learn.misc
 
 ========================================================
  Python Bindings for Miscelaneous Machines and Trainers
@@ -42,7 +42,7 @@ Testing
 
 You can run a set of tests using the nose test runner::
 
-  $ nosetests -sv xbob.learn.misc
+  $ nosetests -sv bob.learn.misc
 
 .. warning::
 
@@ -59,7 +59,7 @@ You can run our documentation tests using sphinx itself::
 
 You can test overall test coverage with::
 
-  $ nosetests --with-coverage --cover-package=xbob.learn.misc
+  $ nosetests --with-coverage --cover-package=bob.learn.misc
 
 The ``coverage`` egg must be installed for this to work properly.
 
diff --git a/xbob/__init__.py b/bob/__init__.py
similarity index 100%
rename from xbob/__init__.py
rename to bob/__init__.py
diff --git a/xbob/learn/__init__.py b/bob/learn/__init__.py
similarity index 100%
rename from xbob/learn/__init__.py
rename to bob/learn/__init__.py
diff --git a/xbob/learn/misc/GaborWaveletTransform.cpp b/bob/learn/misc/GaborWaveletTransform.cpp
similarity index 100%
rename from xbob/learn/misc/GaborWaveletTransform.cpp
rename to bob/learn/misc/GaborWaveletTransform.cpp
diff --git a/xbob/learn/misc/__init__.py b/bob/learn/misc/__init__.py
similarity index 100%
rename from xbob/learn/misc/__init__.py
rename to bob/learn/misc/__init__.py
diff --git a/xbob/learn/misc/bic.cpp b/bob/learn/misc/bic.cpp
similarity index 100%
rename from xbob/learn/misc/bic.cpp
rename to bob/learn/misc/bic.cpp
diff --git a/xbob/learn/misc/bic_trainer.cpp b/bob/learn/misc/bic_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/bic_trainer.cpp
rename to bob/learn/misc/bic_trainer.cpp
diff --git a/xbob/learn/misc/blitz_numpy.cpp b/bob/learn/misc/blitz_numpy.cpp
similarity index 100%
rename from xbob/learn/misc/blitz_numpy.cpp
rename to bob/learn/misc/blitz_numpy.cpp
diff --git a/xbob/learn/misc/data/data.hdf5 b/bob/learn/misc/data/data.hdf5
similarity index 100%
rename from xbob/learn/misc/data/data.hdf5
rename to bob/learn/misc/data/data.hdf5
diff --git a/xbob/learn/misc/data/dataNormalized.hdf5 b/bob/learn/misc/data/dataNormalized.hdf5
similarity index 100%
rename from xbob/learn/misc/data/dataNormalized.hdf5
rename to bob/learn/misc/data/dataNormalized.hdf5
diff --git a/xbob/learn/misc/data/dataforMAP.hdf5 b/bob/learn/misc/data/dataforMAP.hdf5
similarity index 100%
rename from xbob/learn/misc/data/dataforMAP.hdf5
rename to bob/learn/misc/data/dataforMAP.hdf5
diff --git a/xbob/learn/misc/data/faithful.torch3.hdf5 b/bob/learn/misc/data/faithful.torch3.hdf5
similarity index 100%
rename from xbob/learn/misc/data/faithful.torch3.hdf5
rename to bob/learn/misc/data/faithful.torch3.hdf5
diff --git a/xbob/learn/misc/data/faithful.torch3_f64.hdf5 b/bob/learn/misc/data/faithful.torch3_f64.hdf5
similarity index 100%
rename from xbob/learn/misc/data/faithful.torch3_f64.hdf5
rename to bob/learn/misc/data/faithful.torch3_f64.hdf5
diff --git a/xbob/learn/misc/data/gmm.init_means.hdf5 b/bob/learn/misc/data/gmm.init_means.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm.init_means.hdf5
rename to bob/learn/misc/data/gmm.init_means.hdf5
diff --git a/xbob/learn/misc/data/gmm.init_variances.hdf5 b/bob/learn/misc/data/gmm.init_variances.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm.init_variances.hdf5
rename to bob/learn/misc/data/gmm.init_variances.hdf5
diff --git a/xbob/learn/misc/data/gmm.init_weights.hdf5 b/bob/learn/misc/data/gmm.init_weights.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm.init_weights.hdf5
rename to bob/learn/misc/data/gmm.init_weights.hdf5
diff --git a/xbob/learn/misc/data/gmm_MAP.hdf5 b/bob/learn/misc/data/gmm_MAP.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm_MAP.hdf5
rename to bob/learn/misc/data/gmm_MAP.hdf5
diff --git a/xbob/learn/misc/data/gmm_ML.hdf5 b/bob/learn/misc/data/gmm_ML.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm_ML.hdf5
rename to bob/learn/misc/data/gmm_ML.hdf5
diff --git a/xbob/learn/misc/data/gmm_ML_32bit_debug.hdf5 b/bob/learn/misc/data/gmm_ML_32bit_debug.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm_ML_32bit_debug.hdf5
rename to bob/learn/misc/data/gmm_ML_32bit_debug.hdf5
diff --git a/xbob/learn/misc/data/gmm_ML_32bit_release.hdf5 b/bob/learn/misc/data/gmm_ML_32bit_release.hdf5
similarity index 100%
rename from xbob/learn/misc/data/gmm_ML_32bit_release.hdf5
rename to bob/learn/misc/data/gmm_ML_32bit_release.hdf5
diff --git a/xbob/learn/misc/data/means.hdf5 b/bob/learn/misc/data/means.hdf5
similarity index 100%
rename from xbob/learn/misc/data/means.hdf5
rename to bob/learn/misc/data/means.hdf5
diff --git a/xbob/learn/misc/data/meansAfterKMeans.hdf5 b/bob/learn/misc/data/meansAfterKMeans.hdf5
similarity index 100%
rename from xbob/learn/misc/data/meansAfterKMeans.hdf5
rename to bob/learn/misc/data/meansAfterKMeans.hdf5
diff --git a/xbob/learn/misc/data/meansAfterMAP.hdf5 b/bob/learn/misc/data/meansAfterMAP.hdf5
similarity index 100%
rename from xbob/learn/misc/data/meansAfterMAP.hdf5
rename to bob/learn/misc/data/meansAfterMAP.hdf5
diff --git a/xbob/learn/misc/data/meansAfterML.hdf5 b/bob/learn/misc/data/meansAfterML.hdf5
similarity index 100%
rename from xbob/learn/misc/data/meansAfterML.hdf5
rename to bob/learn/misc/data/meansAfterML.hdf5
diff --git a/xbob/learn/misc/data/new_adapted_mean.hdf5 b/bob/learn/misc/data/new_adapted_mean.hdf5
similarity index 100%
rename from xbob/learn/misc/data/new_adapted_mean.hdf5
rename to bob/learn/misc/data/new_adapted_mean.hdf5
diff --git a/xbob/learn/misc/data/samplesFrom2G_f64.hdf5 b/bob/learn/misc/data/samplesFrom2G_f64.hdf5
similarity index 100%
rename from xbob/learn/misc/data/samplesFrom2G_f64.hdf5
rename to bob/learn/misc/data/samplesFrom2G_f64.hdf5
diff --git a/xbob/learn/misc/data/stats.hdf5 b/bob/learn/misc/data/stats.hdf5
similarity index 100%
rename from xbob/learn/misc/data/stats.hdf5
rename to bob/learn/misc/data/stats.hdf5
diff --git a/xbob/learn/misc/data/variances.hdf5 b/bob/learn/misc/data/variances.hdf5
similarity index 100%
rename from xbob/learn/misc/data/variances.hdf5
rename to bob/learn/misc/data/variances.hdf5
diff --git a/xbob/learn/misc/data/variancesAfterKMeans.hdf5 b/bob/learn/misc/data/variancesAfterKMeans.hdf5
similarity index 100%
rename from xbob/learn/misc/data/variancesAfterKMeans.hdf5
rename to bob/learn/misc/data/variancesAfterKMeans.hdf5
diff --git a/xbob/learn/misc/data/variancesAfterMAP.hdf5 b/bob/learn/misc/data/variancesAfterMAP.hdf5
similarity index 100%
rename from xbob/learn/misc/data/variancesAfterMAP.hdf5
rename to bob/learn/misc/data/variancesAfterMAP.hdf5
diff --git a/xbob/learn/misc/data/variancesAfterML.hdf5 b/bob/learn/misc/data/variancesAfterML.hdf5
similarity index 100%
rename from xbob/learn/misc/data/variancesAfterML.hdf5
rename to bob/learn/misc/data/variancesAfterML.hdf5
diff --git a/xbob/learn/misc/data/weights.hdf5 b/bob/learn/misc/data/weights.hdf5
similarity index 100%
rename from xbob/learn/misc/data/weights.hdf5
rename to bob/learn/misc/data/weights.hdf5
diff --git a/xbob/learn/misc/data/weightsAfterKMeans.hdf5 b/bob/learn/misc/data/weightsAfterKMeans.hdf5
similarity index 100%
rename from xbob/learn/misc/data/weightsAfterKMeans.hdf5
rename to bob/learn/misc/data/weightsAfterKMeans.hdf5
diff --git a/xbob/learn/misc/data/weightsAfterMAP.hdf5 b/bob/learn/misc/data/weightsAfterMAP.hdf5
similarity index 100%
rename from xbob/learn/misc/data/weightsAfterMAP.hdf5
rename to bob/learn/misc/data/weightsAfterMAP.hdf5
diff --git a/xbob/learn/misc/data/weightsAfterML.hdf5 b/bob/learn/misc/data/weightsAfterML.hdf5
similarity index 100%
rename from xbob/learn/misc/data/weightsAfterML.hdf5
rename to bob/learn/misc/data/weightsAfterML.hdf5
diff --git a/xbob/learn/misc/data/ztnorm_eval_eval.hdf5 b/bob/learn/misc/data/ztnorm_eval_eval.hdf5
similarity index 100%
rename from xbob/learn/misc/data/ztnorm_eval_eval.hdf5
rename to bob/learn/misc/data/ztnorm_eval_eval.hdf5
diff --git a/xbob/learn/misc/data/ztnorm_eval_tnorm.hdf5 b/bob/learn/misc/data/ztnorm_eval_tnorm.hdf5
similarity index 100%
rename from xbob/learn/misc/data/ztnorm_eval_tnorm.hdf5
rename to bob/learn/misc/data/ztnorm_eval_tnorm.hdf5
diff --git a/xbob/learn/misc/data/ztnorm_result.hdf5 b/bob/learn/misc/data/ztnorm_result.hdf5
similarity index 100%
rename from xbob/learn/misc/data/ztnorm_result.hdf5
rename to bob/learn/misc/data/ztnorm_result.hdf5
diff --git a/xbob/learn/misc/data/ztnorm_znorm_eval.hdf5 b/bob/learn/misc/data/ztnorm_znorm_eval.hdf5
similarity index 100%
rename from xbob/learn/misc/data/ztnorm_znorm_eval.hdf5
rename to bob/learn/misc/data/ztnorm_znorm_eval.hdf5
diff --git a/xbob/learn/misc/data/ztnorm_znorm_tnorm.hdf5 b/bob/learn/misc/data/ztnorm_znorm_tnorm.hdf5
similarity index 100%
rename from xbob/learn/misc/data/ztnorm_znorm_tnorm.hdf5
rename to bob/learn/misc/data/ztnorm_znorm_tnorm.hdf5
diff --git a/xbob/learn/misc/empca_trainer.cpp b/bob/learn/misc/empca_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/empca_trainer.cpp
rename to bob/learn/misc/empca_trainer.cpp
diff --git a/xbob/learn/misc/exception.h b/bob/learn/misc/exception.h
similarity index 100%
rename from xbob/learn/misc/exception.h
rename to bob/learn/misc/exception.h
diff --git a/xbob/learn/misc/gabor.cpp b/bob/learn/misc/gabor.cpp
similarity index 100%
rename from xbob/learn/misc/gabor.cpp
rename to bob/learn/misc/gabor.cpp
diff --git a/xbob/learn/misc/gaussian.cpp b/bob/learn/misc/gaussian.cpp
similarity index 100%
rename from xbob/learn/misc/gaussian.cpp
rename to bob/learn/misc/gaussian.cpp
diff --git a/xbob/learn/misc/gmm.cpp b/bob/learn/misc/gmm.cpp
similarity index 100%
rename from xbob/learn/misc/gmm.cpp
rename to bob/learn/misc/gmm.cpp
diff --git a/xbob/learn/misc/gmm_trainer.cpp b/bob/learn/misc/gmm_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/gmm_trainer.cpp
rename to bob/learn/misc/gmm_trainer.cpp
diff --git a/xbob/learn/misc/hdf5.cpp b/bob/learn/misc/hdf5.cpp
similarity index 100%
rename from xbob/learn/misc/hdf5.cpp
rename to bob/learn/misc/hdf5.cpp
diff --git a/xbob/learn/misc/ivector.cpp b/bob/learn/misc/ivector.cpp
similarity index 100%
rename from xbob/learn/misc/ivector.cpp
rename to bob/learn/misc/ivector.cpp
diff --git a/xbob/learn/misc/ivector_trainer.cpp b/bob/learn/misc/ivector_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/ivector_trainer.cpp
rename to bob/learn/misc/ivector_trainer.cpp
diff --git a/xbob/learn/misc/jfa.cpp b/bob/learn/misc/jfa.cpp
similarity index 100%
rename from xbob/learn/misc/jfa.cpp
rename to bob/learn/misc/jfa.cpp
diff --git a/xbob/learn/misc/jfa_trainer.cpp b/bob/learn/misc/jfa_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/jfa_trainer.cpp
rename to bob/learn/misc/jfa_trainer.cpp
diff --git a/xbob/learn/misc/kmeans.cpp b/bob/learn/misc/kmeans.cpp
similarity index 100%
rename from xbob/learn/misc/kmeans.cpp
rename to bob/learn/misc/kmeans.cpp
diff --git a/xbob/learn/misc/kmeans_trainer.cpp b/bob/learn/misc/kmeans_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/kmeans_trainer.cpp
rename to bob/learn/misc/kmeans_trainer.cpp
diff --git a/xbob/learn/misc/linearscoring.cpp b/bob/learn/misc/linearscoring.cpp
similarity index 100%
rename from xbob/learn/misc/linearscoring.cpp
rename to bob/learn/misc/linearscoring.cpp
diff --git a/xbob/learn/misc/machine.cpp b/bob/learn/misc/machine.cpp
similarity index 100%
rename from xbob/learn/misc/machine.cpp
rename to bob/learn/misc/machine.cpp
diff --git a/xbob/learn/misc/main.cpp b/bob/learn/misc/main.cpp
similarity index 100%
rename from xbob/learn/misc/main.cpp
rename to bob/learn/misc/main.cpp
diff --git a/xbob/learn/misc/ndarray.cpp b/bob/learn/misc/ndarray.cpp
similarity index 100%
rename from xbob/learn/misc/ndarray.cpp
rename to bob/learn/misc/ndarray.cpp
diff --git a/xbob/learn/misc/ndarray.h b/bob/learn/misc/ndarray.h
similarity index 100%
rename from xbob/learn/misc/ndarray.h
rename to bob/learn/misc/ndarray.h
diff --git a/xbob/learn/misc/ndarray_numpy.cpp b/bob/learn/misc/ndarray_numpy.cpp
similarity index 100%
rename from xbob/learn/misc/ndarray_numpy.cpp
rename to bob/learn/misc/ndarray_numpy.cpp
diff --git a/xbob/learn/misc/plda.cpp b/bob/learn/misc/plda.cpp
similarity index 100%
rename from xbob/learn/misc/plda.cpp
rename to bob/learn/misc/plda.cpp
diff --git a/xbob/learn/misc/plda_trainer.cpp b/bob/learn/misc/plda_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/plda_trainer.cpp
rename to bob/learn/misc/plda_trainer.cpp
diff --git a/xbob/learn/misc/random.cpp b/bob/learn/misc/random.cpp
similarity index 100%
rename from xbob/learn/misc/random.cpp
rename to bob/learn/misc/random.cpp
diff --git a/xbob/learn/misc/test_bic.py b/bob/learn/misc/test_bic.py
similarity index 100%
rename from xbob/learn/misc/test_bic.py
rename to bob/learn/misc/test_bic.py
diff --git a/xbob/learn/misc/test_em.py b/bob/learn/misc/test_em.py
similarity index 70%
rename from xbob/learn/misc/test_em.py
rename to bob/learn/misc/test_em.py
index bfc8a16..88a5cde 100644
--- a/xbob/learn/misc/test_em.py
+++ b/bob/learn/misc/test_em.py
@@ -10,8 +10,8 @@
 import unittest
 import numpy
 
-import xbob.io.base
-from xbob.io.base.test_utils import datafile
+import bob.io.base
+from bob.io.base.test_utils import datafile
 
 from . import KMeansMachine, GMMMachine, KMeansTrainer, \
     ML_GMMTrainer, MAP_GMMTrainer
@@ -21,9 +21,9 @@ from . import HDF5File as OldHDF5File
 def loadGMM():
   gmm = GMMMachine(2, 2)
 
-  gmm.weights = xbob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
-  gmm.means = xbob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
-  gmm.variances = xbob.io.base.load(datafile('gmm.init_variances.hdf5', __name__))
+  gmm.weights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
+  gmm.means = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
+  gmm.variances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__))
   gmm.variance_threshold = numpy.array([0.001, 0.001], 'float64')
 
   return gmm
@@ -47,7 +47,7 @@ def test_gmm_ML_1():
 
   # Trains a GMMMachine with ML_GMMTrainer
 
-  ar = xbob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
+  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
 
   gmm = loadGMM()
 
@@ -67,13 +67,13 @@ def test_gmm_ML_2():
 
   # Trains a GMMMachine with ML_GMMTrainer; compares to an old reference
 
-  ar = xbob.io.base.load(datafile('dataNormalized.hdf5', __name__))
+  ar = bob.io.base.load(datafile('dataNormalized.hdf5', __name__))
 
   # Initialize GMMMachine
   gmm = GMMMachine(5, 45)
-  gmm.means = xbob.io.base.load(datafile('meansAfterKMeans.hdf5', __name__)).astype('float64')
-  gmm.variances = xbob.io.base.load(datafile('variancesAfterKMeans.hdf5', __name__)).astype('float64')
-  gmm.weights = numpy.exp(xbob.io.base.load(datafile('weightsAfterKMeans.hdf5', __name__)).astype('float64'))
+  gmm.means = bob.io.base.load(datafile('meansAfterKMeans.hdf5', __name__)).astype('float64')
+  gmm.variances = bob.io.base.load(datafile('variancesAfterKMeans.hdf5', __name__)).astype('float64')
+  gmm.weights = numpy.exp(bob.io.base.load(datafile('weightsAfterKMeans.hdf5', __name__)).astype('float64'))
 
   threshold = 0.001
   gmm.set_variance_thresholds(threshold)
@@ -91,9 +91,9 @@ def test_gmm_ML_2():
 
   # Test results
   # Load torch3vision reference
-  meansML_ref = xbob.io.base.load(datafile('meansAfterML.hdf5', __name__))
-  variancesML_ref = xbob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
-  weightsML_ref = xbob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
+  meansML_ref = bob.io.base.load(datafile('meansAfterML.hdf5', __name__))
+  variancesML_ref = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
+  weightsML_ref = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
 
   # Compare to current results
   assert equals(gmm.means, meansML_ref, 3e-3)
@@ -104,7 +104,7 @@ def test_gmm_MAP_1():
 
   # Train a GMMMachine with MAP_GMMTrainer
 
-  ar = xbob.io.base.load(datafile('faithful.torch3_f64.hdf5', __name__))
+  ar = bob.io.base.load(datafile('faithful.torch3_f64.hdf5', __name__))
 
   gmm = GMMMachine(OldHDF5File(datafile("gmm_ML.hdf5", __name__)))
   gmmprior = GMMMachine(OldHDF5File(datafile("gmm_ML.hdf5", __name__)))
@@ -126,11 +126,11 @@ def test_gmm_MAP_2():
   # Train a GMMMachine with MAP_GMMTrainer and compare with matlab reference
 
   map_adapt = MAP_GMMTrainer(4., True, False, False, 0.)
-  data = xbob.io.base.load(datafile('data.hdf5', __name__))
+  data = bob.io.base.load(datafile('data.hdf5', __name__))
   data = data.reshape((1, data.shape[0])) # make a 2D array out of it
-  means = xbob.io.base.load(datafile('means.hdf5', __name__))
-  variances = xbob.io.base.load(datafile('variances.hdf5', __name__))
-  weights = xbob.io.base.load(datafile('weights.hdf5', __name__))
+  means = bob.io.base.load(datafile('means.hdf5', __name__))
+  variances = bob.io.base.load(datafile('variances.hdf5', __name__))
+  weights = bob.io.base.load(datafile('weights.hdf5', __name__))
 
   gmm = GMMMachine(2,50)
   gmm.means = means
@@ -147,7 +147,7 @@ def test_gmm_MAP_2():
   map_adapt.max_iterations = 1
   map_adapt.train(gmm_adapted, data)
 
-  new_means = xbob.io.base.load(datafile('new_adapted_mean.hdf5', __name__))
+  new_means = bob.io.base.load(datafile('new_adapted_mean.hdf5', __name__))
 
   # Compare to matlab reference
   assert equals(new_means[0,:], gmm_adapted.means[:,0], 1e-4)
@@ -157,15 +157,15 @@ def test_gmm_MAP_3():
 
   # Train a GMMMachine with MAP_GMMTrainer; compares to old reference
 
-  ar = xbob.io.base.load(datafile('dataforMAP.hdf5', __name__))
+  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__))
 
   # Initialize GMMMachine
   n_gaussians = 5
   n_inputs = 45
   prior_gmm = GMMMachine(n_gaussians, n_inputs)
-  prior_gmm.means = xbob.io.base.load(datafile('meansAfterML.hdf5', __name__))
-  prior_gmm.variances = xbob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
-  prior_gmm.weights = xbob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
+  prior_gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__))
+  prior_gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
+  prior_gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
 
   threshold = 0.001
   prior_gmm.set_variance_thresholds(threshold)
@@ -190,9 +190,9 @@ def test_gmm_MAP_3():
 
   # Test results
   # Load torch3vision reference
-  meansMAP_ref = xbob.io.base.load(datafile('meansAfterMAP.hdf5', __name__))
-  variancesMAP_ref = xbob.io.base.load(datafile('variancesAfterMAP.hdf5', __name__))
-  weightsMAP_ref = xbob.io.base.load(datafile('weightsAfterMAP.hdf5', __name__))
+  meansMAP_ref = bob.io.base.load(datafile('meansAfterMAP.hdf5', __name__))
+  variancesMAP_ref = bob.io.base.load(datafile('variancesAfterMAP.hdf5', __name__))
+  weightsMAP_ref = bob.io.base.load(datafile('weightsAfterMAP.hdf5', __name__))
 
   # Compare to current results
   # Gaps are quite large. This might be explained by the fact that there is no
@@ -207,15 +207,15 @@ def test_gmm_test():
   # Tests a GMMMachine by computing scores against a model and compare to
   # an old reference
 
-  ar = xbob.io.base.load(datafile('dataforMAP.hdf5', __name__))
+  ar = bob.io.base.load(datafile('dataforMAP.hdf5', __name__))
 
   # Initialize GMMMachine
   n_gaussians = 5
   n_inputs = 45
   gmm = GMMMachine(n_gaussians, n_inputs)
-  gmm.means = xbob.io.base.load(datafile('meansAfterML.hdf5', __name__))
-  gmm.variances = xbob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
-  gmm.weights = xbob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
+  gmm.means = bob.io.base.load(datafile('meansAfterML.hdf5', __name__))
+  gmm.variances = bob.io.base.load(datafile('variancesAfterML.hdf5', __name__))
+  gmm.weights = bob.io.base.load(datafile('weightsAfterML.hdf5', __name__))
 
   threshold = 0.001
   gmm.set_variance_thresholds(threshold)
@@ -233,7 +233,7 @@ def test_custom_trainer():
 
   # Custom python trainer
 
-  ar = xbob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
+  ar = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
 
   mytrainer = MyTrainer1()
 
diff --git a/xbob/learn/misc/test_gaussian.py b/bob/learn/misc/test_gaussian.py
similarity index 99%
rename from xbob/learn/misc/test_gaussian.py
rename to bob/learn/misc/test_gaussian.py
index f9b82e8..3c75159 100644
--- a/xbob/learn/misc/test_gaussian.py
+++ b/bob/learn/misc/test_gaussian.py
@@ -12,7 +12,7 @@ import os
 import numpy
 import tempfile
 
-import xbob.io.base
+import bob.io.base
 
 from . import Gaussian
 
diff --git a/xbob/learn/misc/test_gmm.py b/bob/learn/misc/test_gmm.py
similarity index 94%
rename from xbob/learn/misc/test_gmm.py
rename to bob/learn/misc/test_gmm.py
index 57df3b2..e327646 100644
--- a/xbob/learn/misc/test_gmm.py
+++ b/bob/learn/misc/test_gmm.py
@@ -12,8 +12,8 @@ import os
 import numpy
 import tempfile
 
-import xbob.io.base
-from xbob.io.base.test_utils import datafile
+import bob.io.base
+from bob.io.base.test_utils import datafile
 
 from . import GMMStats, GMMMachine
 
@@ -182,7 +182,7 @@ def test_GMMMachine_1():
 def test_GMMMachine_2():
   # Test a GMMMachine (statistics)
 
-  arrayset = xbob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
+  arrayset = bob.io.base.load(datafile("faithful.torch3_f64.hdf5", __name__))
   gmm = GMMMachine(2, 2)
   gmm.weights   = numpy.array([0.5, 0.5], 'float64')
   gmm.means     = numpy.array([[3, 70], [4, 72]], 'float64')
@@ -204,11 +204,11 @@ def test_GMMMachine_2():
 def test_GMMMachine_3():
   # Test a GMMMachine (log-likelihood computation)
 
-  data = xbob.io.base.load(datafile('data.hdf5', __name__))
+  data = bob.io.base.load(datafile('data.hdf5', __name__))
   gmm = GMMMachine(2, 50)
-  gmm.weights   = xbob.io.base.load(datafile('weights.hdf5', __name__))
-  gmm.means     = xbob.io.base.load(datafile('means.hdf5', __name__))
-  gmm.variances = xbob.io.base.load(datafile('variances.hdf5', __name__))
+  gmm.weights   = bob.io.base.load(datafile('weights.hdf5', __name__))
+  gmm.means     = bob.io.base.load(datafile('means.hdf5', __name__))
+  gmm.variances = bob.io.base.load(datafile('variances.hdf5', __name__))
 
   # Compare the log-likelihood with the one obtained using Chris Matlab
   # implementation
diff --git a/xbob/learn/misc/test_ivector.py b/bob/learn/misc/test_ivector.py
similarity index 100%
rename from xbob/learn/misc/test_ivector.py
rename to bob/learn/misc/test_ivector.py
diff --git a/xbob/learn/misc/test_ivector_trainer.py b/bob/learn/misc/test_ivector_trainer.py
similarity index 100%
rename from xbob/learn/misc/test_ivector_trainer.py
rename to bob/learn/misc/test_ivector_trainer.py
diff --git a/xbob/learn/misc/test_jfa.py b/bob/learn/misc/test_jfa.py
similarity index 99%
rename from xbob/learn/misc/test_jfa.py
rename to bob/learn/misc/test_jfa.py
index 9c1897c..3142809 100644
--- a/xbob/learn/misc/test_jfa.py
+++ b/bob/learn/misc/test_jfa.py
@@ -13,7 +13,7 @@ import numpy
 import numpy.linalg
 import tempfile
 
-import xbob.io.base
+import bob.io.base
 
 from . import GMMMachine, GMMStats, JFABase, ISVBase, ISVMachine, JFAMachine
 
diff --git a/xbob/learn/misc/test_jfa_trainer.py b/bob/learn/misc/test_jfa_trainer.py
similarity index 100%
rename from xbob/learn/misc/test_jfa_trainer.py
rename to bob/learn/misc/test_jfa_trainer.py
diff --git a/xbob/learn/misc/test_kmeans.py b/bob/learn/misc/test_kmeans.py
similarity index 98%
rename from xbob/learn/misc/test_kmeans.py
rename to bob/learn/misc/test_kmeans.py
index 920b516..8915b6b 100644
--- a/xbob/learn/misc/test_kmeans.py
+++ b/bob/learn/misc/test_kmeans.py
@@ -12,7 +12,7 @@ import os
 import numpy
 import tempfile
 
-import xbob.io.base
+import bob.io.base
 from . import KMeansMachine
 
 from . import HDF5File as OldHDF5File
diff --git a/xbob/learn/misc/test_kmeans_trainer.py b/bob/learn/misc/test_kmeans_trainer.py
similarity index 90%
rename from xbob/learn/misc/test_kmeans_trainer.py
rename to bob/learn/misc/test_kmeans_trainer.py
index bcf3f20..8d47cf0 100644
--- a/xbob/learn/misc/test_kmeans_trainer.py
+++ b/bob/learn/misc/test_kmeans_trainer.py
@@ -9,9 +9,9 @@
 """
 import numpy
 
-import xbob.core
-import xbob.io
-from xbob.io.base.test_utils import datafile
+import bob.core
+import bob.io
+from bob.io.base.test_utils import datafile
 
 from . import KMeansMachine, KMeansTrainer
 
@@ -23,8 +23,8 @@ def equals(x, y, epsilon):
 def kmeans_plus_plus(machine, data, seed):
   """Python implementation of K-Means++ (initialization)"""
   n_data = data.shape[0]
-  rng = xbob.core.random.mt19937(seed)
-  u = xbob.core.random.uniform('int32', 0, n_data-1)
+  rng = bob.core.random.mt19937(seed)
+  u = bob.core.random.uniform('int32', 0, n_data-1)
   index = u(rng)
   machine.set_mean(0, data[index,:])
   weights = numpy.zeros(shape=(n_data,), dtype=numpy.float64)
@@ -38,13 +38,13 @@ def kmeans_plus_plus(machine, data, seed):
       weights[s] = w_cur
     weights *= weights
     weights /= numpy.sum(weights)
-    d = xbob.core.random.discrete('int32', weights)
+    d = bob.core.random.discrete('int32', weights)
     index = d(rng)
     machine.set_mean(m, data[index,:])
 
 
 def NormalizeStdArray(path):
-  array = xbob.io.base.load(path).astype('float64')
+  array = bob.io.base.load(path).astype('float64')
   std = array.std(axis=0)
   return (array/std, std)
 
@@ -104,7 +104,7 @@ def test_kmeans_a():
   # This files contains draws from two 1D Gaussian distributions:
   #   * 100 samples from N(-10,1)
   #   * 100 samples from N(10,1)
-  data = xbob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__))
+  data = bob.io.base.load(datafile("samplesFrom2G_f64.hdf5", __name__))
 
   machine = KMeansMachine(2, 1)
 
@@ -147,9 +147,9 @@ def test_kmeans_b():
   multiplyVectorsByFactors(means, std)
   multiplyVectorsByFactors(variances, std ** 2)
 
-  gmmWeights = xbob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
-  gmmMeans = xbob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
-  gmmVariances = xbob.io.base.load(datafile('gmm.init_variances.hdf5', __name__))
+  gmmWeights = bob.io.base.load(datafile('gmm.init_weights.hdf5', __name__))
+  gmmMeans = bob.io.base.load(datafile('gmm.init_means.hdf5', __name__))
+  gmmVariances = bob.io.base.load(datafile('gmm.init_variances.hdf5', __name__))
 
   if (means[0, 0] < means[1, 0]):
     means = flipRows(means)
diff --git a/xbob/learn/misc/test_linearscoring.py b/bob/learn/misc/test_linearscoring.py
similarity index 100%
rename from xbob/learn/misc/test_linearscoring.py
rename to bob/learn/misc/test_linearscoring.py
diff --git a/xbob/learn/misc/test_plda.py b/bob/learn/misc/test_plda.py
similarity index 99%
rename from xbob/learn/misc/test_plda.py
rename to bob/learn/misc/test_plda.py
index a97c7dc..4eeb22d 100644
--- a/xbob/learn/misc/test_plda.py
+++ b/bob/learn/misc/test_plda.py
@@ -15,7 +15,7 @@ import numpy
 import numpy.linalg
 import nose.tools
 
-import xbob.io.base
+import bob.io.base
 
 from . import PLDABase, PLDAMachine
 
diff --git a/xbob/learn/misc/test_plda_trainer.py b/bob/learn/misc/test_plda_trainer.py
similarity index 100%
rename from xbob/learn/misc/test_plda_trainer.py
rename to bob/learn/misc/test_plda_trainer.py
diff --git a/xbob/learn/misc/test_wiener.py b/bob/learn/misc/test_wiener.py
similarity index 94%
rename from xbob/learn/misc/test_wiener.py
rename to bob/learn/misc/test_wiener.py
index 331ca3b..6239d38 100644
--- a/xbob/learn/misc/test_wiener.py
+++ b/bob/learn/misc/test_wiener.py
@@ -12,8 +12,8 @@ import numpy
 import tempfile
 import nose.tools
 
-import xbob.sp
-import xbob.io.base
+import bob.sp
+import bob.io.base
 
 from . import WienerMachine
 
@@ -88,10 +88,10 @@ def test_forward():
 
   # Python way
   sample = numpy.random.randn(5,6)
-  sample_fft = xbob.sp.fft(sample.astype(numpy.complex128))
+  sample_fft = bob.sp.fft(sample.astype(numpy.complex128))
   w = m.w
   sample_fft_filtered = sample_fft * m.w
-  sample_filtered_py = numpy.absolute(xbob.sp.ifft(sample_fft_filtered))
+  sample_filtered_py = numpy.absolute(bob.sp.ifft(sample_fft_filtered))
 
   # Bob c++ way
   sample_filtered0 = m.forward(sample)
diff --git a/xbob/learn/misc/test_wiener_trainer.py b/bob/learn/misc/test_wiener_trainer.py
similarity index 95%
rename from xbob/learn/misc/test_wiener_trainer.py
rename to bob/learn/misc/test_wiener_trainer.py
index 676429f..53d619e 100644
--- a/xbob/learn/misc/test_wiener_trainer.py
+++ b/bob/learn/misc/test_wiener_trainer.py
@@ -8,7 +8,7 @@
 """
 
 import numpy
-import xbob.sp
+import bob.sp
 
 from . import WienerMachine, WienerTrainer
 
@@ -22,7 +22,7 @@ def train_wiener_ps(training_set):
 
   for n in range(n_samples):
     sample = (training_set[n,:,:]).astype(numpy.complex128)
-    training_fftabs[n,:,:] = numpy.absolute(xbob.sp.fft(sample))
+    training_fftabs[n,:,:] = numpy.absolute(bob.sp.fft(sample))
 
   mean = numpy.mean(training_fftabs, axis=0)
 
diff --git a/xbob/learn/misc/test_ztnorm.py b/bob/learn/misc/test_ztnorm.py
similarity index 90%
rename from xbob/learn/misc/test_ztnorm.py
rename to bob/learn/misc/test_ztnorm.py
index fe4563c..40d7efa 100644
--- a/xbob/learn/misc/test_ztnorm.py
+++ b/bob/learn/misc/test_ztnorm.py
@@ -11,8 +11,8 @@
 
 import numpy
 
-from xbob.io.base.test_utils import datafile
-import xbob.io.base
+from bob.io.base.test_utils import datafile
+import bob.io.base
 
 from . import znorm, tnorm, ztnorm
 
@@ -68,13 +68,13 @@ def test_ztnorm_simple():
   assert (abs(scores - ref_scores) < 1e-7).all()
 
 def test_ztnorm_big():
-  my_A = xbob.io.base.load(datafile("ztnorm_eval_eval.hdf5", __name__))
-  my_B = xbob.io.base.load(datafile("ztnorm_znorm_eval.hdf5", __name__))
-  my_C = xbob.io.base.load(datafile("ztnorm_eval_tnorm.hdf5", __name__))
-  my_D = xbob.io.base.load(datafile("ztnorm_znorm_tnorm.hdf5", __name__))
+  my_A = bob.io.base.load(datafile("ztnorm_eval_eval.hdf5", __name__))
+  my_B = bob.io.base.load(datafile("ztnorm_znorm_eval.hdf5", __name__))
+  my_C = bob.io.base.load(datafile("ztnorm_eval_tnorm.hdf5", __name__))
+  my_D = bob.io.base.load(datafile("ztnorm_znorm_tnorm.hdf5", __name__))
 
   # ZT-Norm
-  ref_scores = xbob.io.base.load(datafile("ztnorm_result.hdf5", __name__))
+  ref_scores = bob.io.base.load(datafile("ztnorm_result.hdf5", __name__))
   scores = ztnorm(my_A, my_B, my_C, my_D)
   assert (abs(scores - ref_scores) < 1e-7).all()
 
diff --git a/xbob/learn/misc/tinyvector.cpp b/bob/learn/misc/tinyvector.cpp
similarity index 100%
rename from xbob/learn/misc/tinyvector.cpp
rename to bob/learn/misc/tinyvector.cpp
diff --git a/xbob/learn/misc/version.cpp b/bob/learn/misc/version.cpp
similarity index 86%
rename from xbob/learn/misc/version.cpp
rename to bob/learn/misc/version.cpp
index 6befab3..03615dc 100644
--- a/xbob/learn/misc/version.cpp
+++ b/bob/learn/misc/version.cpp
@@ -19,8 +19,8 @@
 #ifdef NO_IMPORT_ARRAY
 #undef NO_IMPORT_ARRAY
 #endif
-#include <xbob.blitz/capi.h>
-#include <xbob.blitz/cleanup.h>
+#include <bob.blitz/capi.h>
+#include <bob.blitz/cleanup.h>
 
 static int dict_set(PyObject* d, const char* key, const char* value) {
   PyObject* v = Py_BuildValue("s", value);
@@ -96,10 +96,10 @@ static PyObject* numpy_version() {
 }
 
 /**
- * xbob.blitz c/c++ api version
+ * bob.blitz c/c++ api version
  */
-static PyObject* xbob_blitz_version() {
-  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(XBOB_BLITZ_API_VERSION));
+static PyObject* bob_blitz_version() {
+  return Py_BuildValue("{ss}", "api", BOOST_PP_STRINGIZE(BOB_BLITZ_API_VERSION));
 }
 
 static PyObject* build_version_dictionary() {
@@ -113,7 +113,7 @@ static PyObject* build_version_dictionary() {
   if (!dict_steal(retval, "Compiler", compiler_version())) return 0;
   if (!dict_steal(retval, "Python", python_version())) return 0;
   if (!dict_steal(retval, "NumPy", numpy_version())) return 0;
-  if (!dict_steal(retval, "xbob.blitz", xbob_blitz_version())) return 0;
+  if (!dict_steal(retval, "bob.blitz", bob_blitz_version())) return 0;
   if (!dict_steal(retval, "Bob", bob_version())) return 0;
 
   Py_INCREF(retval);
@@ -131,7 +131,7 @@ PyDoc_STRVAR(module_docstr,
 #if PY_VERSION_HEX >= 0x03000000
 static PyModuleDef module_definition = {
   PyModuleDef_HEAD_INIT,
-  XBOB_EXT_MODULE_NAME,
+  BOB_EXT_MODULE_NAME,
   module_docstr,
   -1,
   module_methods,
@@ -144,13 +144,13 @@ static PyObject* create_module (void) {
 # if PY_VERSION_HEX >= 0x03000000
   PyObject* m = PyModule_Create(&module_definition);
 # else
-  PyObject* m = Py_InitModule3(XBOB_EXT_MODULE_NAME, module_methods, module_docstr);
+  PyObject* m = Py_InitModule3(BOB_EXT_MODULE_NAME, module_methods, module_docstr);
 # endif
   if (!m) return 0;
   auto m_ = make_safe(m); ///< protects against early returns
 
   /* register version numbers and constants */
-  if (PyModule_AddStringConstant(m, "module", XBOB_EXT_MODULE_VERSION) < 0)
+  if (PyModule_AddStringConstant(m, "module", BOB_EXT_MODULE_VERSION) < 0)
     return 0;
 
   PyObject* externals = build_version_dictionary();
@@ -158,9 +158,9 @@ static PyObject* create_module (void) {
   if (PyModule_AddObject(m, "externals", externals) < 0) return 0;
 
   /* imports dependencies */
-  if (import_xbob_blitz() < 0) {
+  if (import_bob_blitz() < 0) {
     PyErr_Print();
-    PyErr_Format(PyExc_ImportError, "cannot import `%s'", XBOB_EXT_MODULE_NAME);
+    PyErr_Format(PyExc_ImportError, "cannot import `%s'", BOB_EXT_MODULE_NAME);
     return 0;
   }
 
@@ -169,7 +169,7 @@ static PyObject* create_module (void) {
 
 }
 
-PyMODINIT_FUNC XBOB_EXT_ENTRY_NAME (void) {
+PyMODINIT_FUNC BOB_EXT_ENTRY_NAME (void) {
 # if PY_VERSION_HEX >= 0x03000000
   return
 # endif
diff --git a/xbob/learn/misc/wiener.cpp b/bob/learn/misc/wiener.cpp
similarity index 100%
rename from xbob/learn/misc/wiener.cpp
rename to bob/learn/misc/wiener.cpp
diff --git a/xbob/learn/misc/wiener_trainer.cpp b/bob/learn/misc/wiener_trainer.cpp
similarity index 100%
rename from xbob/learn/misc/wiener_trainer.cpp
rename to bob/learn/misc/wiener_trainer.cpp
diff --git a/xbob/learn/misc/ztnorm.cpp b/bob/learn/misc/ztnorm.cpp
similarity index 100%
rename from xbob/learn/misc/ztnorm.cpp
rename to bob/learn/misc/ztnorm.cpp
diff --git a/buildout.cfg b/buildout.cfg
index 0579e05..563bcb0 100644
--- a/buildout.cfg
+++ b/buildout.cfg
@@ -4,29 +4,29 @@
 
 [buildout]
 parts = scripts
-eggs = xbob.learn.misc
-extensions = xbob.buildout
+eggs = bob.learn.misc
+extensions = bob.buildout
              mr.developer
 auto-checkout = *
-develop = src/xbob.extension
-          src/xbob.blitz
-          src/xbob.core
-          src/xbob.io.base
-          src/xbob.sp
+develop = src/bob.extension
+          src/bob.blitz
+          src/bob.core
+          src/bob.io.base
+          src/bob.sp
           .
 
-; options for xbob.buildout extension
+; options for bob.buildout extension
 debug = true
 verbose = true
 prefixes = /idiap/group/torch5spro/releases/preview/install/linux-x86_64-release
            /Users/andre/work/bob/b/dbg/
 
 [sources]
-xbob.extension = git https://github.com/bioidiap/xbob.extension branch=prototype
-xbob.blitz = git https://github.com/bioidiap/xbob.blitz
-xbob.core = git https://github.com/bioidiap/xbob.core
-xbob.io.base = git https://github.com/bioidiap/xbob.io.base
-xbob.sp = git https://github.com/bioidiap/xbob.sp
+bob.extension = git https://github.com/bioidiap/bob.extension
+bob.blitz = git https://github.com/bioidiap/bob.blitz
+bob.core = git https://github.com/bioidiap/bob.core
+bob.io.base = git https://github.com/bioidiap/bob.io.base
+bob.sp = git https://github.com/bioidiap/bob.sp
 
 [scripts]
-recipe = xbob.buildout:scripts
+recipe = bob.buildout:scripts
diff --git a/doc/conf.py b/doc/conf.py
index 8ebae82..c112d53 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -58,12 +58,12 @@ source_suffix = '.rst'
 master_doc = 'index'
 
 # General information about the project.
-project = u'xbob.learn.misc'
+project = u'bob.learn.misc'
 import time
 copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
 
 # Grab the setup entry
-distribution = pkg_resources.require('xbob.learn.misc')[0]
+distribution = pkg_resources.require('bob.learn.misc')[0]
 
 # The version info for the project you're documenting, acts as replacement for
 # |version| and |release|, also used in various other places throughout the
@@ -129,7 +129,7 @@ if sphinx.__version__ >= "1.0":
 #html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = 'xbob_learn_misc'
+#html_short_title = 'bob_learn_misc'
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
@@ -187,7 +187,7 @@ html_favicon = 'img/favicon.ico'
 #html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = 'xbob_learn_misc_doc'
+htmlhelp_basename = 'bob_learn_misc_doc'
 
 
 # -- Options for LaTeX output --------------------------------------------------
@@ -201,7 +201,7 @@ latex_font_size = '10pt'
 # Grouping the document tree into LaTeX files. List of tuples
 # (source start file, target name, title, author, documentclass [howto/manual]).
 latex_documents = [
-  ('index', 'xbob_learn_misc.tex', u'Bob Miscellaneous Machine Learning Tools',
+  ('index', 'bob_learn_misc.tex', u'Bob Miscellaneous Machine Learning Tools',
    u'Biometrics Group, Idiap Research Institute', 'manual'),
 ]
 
@@ -241,7 +241,7 @@ rst_epilog = """
 # One entry per manual page. List of tuples
 # (source start file, name, description, authors, manual section).
 man_pages = [
-    ('index', 'xbob_learn_misc', u'Bob Miscellaneous Machine Learning Tools', [u'Idiap Research Institute'], 1)
+    ('index', 'bob_learn_misc', u'Bob Miscellaneous Machine Learning Tools', [u'Idiap Research Institute'], 1)
 ]
 
 # Default processing flags for sphinx
diff --git a/doc/guide.rst b/doc/guide.rst
index 1c67b69..e174fd4 100644
--- a/doc/guide.rst
+++ b/doc/guide.rst
@@ -9,12 +9,12 @@
    import numpy
    numpy.set_printoptions(precision=3, suppress=True)
 
-   import xbob.learn.misc
+   import bob.learn.misc
 
    import os
    import tempfile
    current_directory = os.path.realpath(os.curdir)
-   temp_dir = tempfile.mkdtemp(prefix='xbob_doctest_')
+   temp_dir = tempfile.mkdtemp(prefix='bob_doctest_')
    os.chdir(temp_dir)
 
 ============
@@ -38,12 +38,12 @@ K-means machines
 method which aims to partition a set of observations into :math:`k` clusters.
 The `training` procedure is described further below. Here, we explain only how
 to use the resulting machine. For the sake of example, we create a new
-:py:class:`xbob.learn.misc.KMeansMachine` as follows:
+:py:class:`bob.learn.misc.KMeansMachine` as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> machine = xbob.learn.misc.KMeansMachine(2,3) # Two clusters with a feature dimensionality of 3
+   >>> machine = bob.learn.misc.KMeansMachine(2,3) # Two clusters with a feature dimensionality of 3
    >>> machine.means = numpy.array([[1,0,0],[0,0,1]], 'float64') # Defines the two clusters
 
 Then, given some input data, it is possible to determine to which cluster the
@@ -60,7 +60,7 @@ data is the closest as well as the min distance.
 Gaussian machines
 =================
 
-The :py:class:`xbob.learn.misc.Gaussian` represents a `multivariate diagonal
+The :py:class:`bob.learn.misc.Gaussian` represents a `multivariate diagonal
 Gaussian (or normal) distribution
 <http://en.wikipedia.org/wiki/Multivariate_normal_distribution>`_. In this
 context, a *diagonal* Gaussian refers to the covariance matrix of the
@@ -68,13 +68,13 @@ distribution being diagonal. When the covariance matrix is diagonal, each
 variable in the distribution is independent of the others.
 
 Objects of this class are normally used as building blocks for more complex
-:py:class:`xbob.learn.misc.GMMMachine` or GMM objects, but can also be used
+:py:class:`bob.learn.misc.GMMMachine` or GMM objects, but can also be used
 individually. Here is how to create one multivariate diagonal Gaussian
 distribution:
 
 .. doctest::
 
-  >>> g = xbob.learn.misc.Gaussian(2) #bi-variate diagonal normal distribution
+  >>> g = bob.learn.misc.Gaussian(2) #bi-variate diagonal normal distribution
   >>> g.mean = numpy.array([0.3, 0.7], 'float64')
   >>> g.mean
   array([ 0.3,  0.7])
@@ -82,7 +82,7 @@ distribution:
   >>> g.variance
   array([ 0.2,  0.1])
 
-Once the :py:class:`xbob.learn.misc.Gaussian` has been set, you can use it to
+Once the :py:class:`bob.learn.misc.Gaussian` has been set, you can use it to
 estimate the log-likelihood of an input feature vector with a matching number
 of dimensions:
 
@@ -91,25 +91,25 @@ of dimensions:
   >>> log_likelihood = g(numpy.array([0.4, 0.4], 'float64'))
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`xbob.learn.misc.Gaussian.save` and the class constructor
+:py:meth:`bob.learn.misc.Gaussian.save` and the class constructor
 respectively.
 
 Gaussian mixture models
 =======================
 
-The :py:class:`xbob.learn.misc.GMMMachine` represents a Gaussian `mixture model
+The :py:class:`bob.learn.misc.GMMMachine` represents a Gaussian `mixture model
 <http://en.wikipedia.org/wiki/Mixture_model>`_ (GMM), which consists of a
-mixture of weighted :py:class:`xbob.learn.misc.Gaussian`\s.
+mixture of weighted :py:class:`bob.learn.misc.Gaussian`\s.
 
 .. doctest::
 
-  >>> gmm = xbob.learn.misc.GMMMachine(2,3) # Mixture of two diagonal Gaussian of dimension 3
+  >>> gmm = bob.learn.misc.GMMMachine(2,3) # Mixture of two diagonal Gaussian of dimension 3
 
 By default, the diagonal Gaussian distributions of the GMM are initialized with
 zero mean and unit variance, and the weights are identical. This can be updated
-using the :py:attr:`xbob.learn.misc.GMMMachine.means`,
-:py:attr:`xbob.learn.misc.GMMMachine.variances` or
-:py:attr:`xbob.learn.misc.GMMMachine.weights`.
+using the :py:attr:`bob.learn.misc.GMMMachine.means`,
+:py:attr:`bob.learn.misc.GMMMachine.variances` or
+:py:attr:`bob.learn.misc.GMMMachine.weights`.
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
@@ -121,7 +121,7 @@ using the :py:attr:`xbob.learn.misc.GMMMachine.means`,
   array([[ 1.,  6.,  2.],
        [ 4.,  3.,  2.]])
 
-Once the :py:class:`xbob.learn.misc.GMMMachine` has been set, you can use it to
+Once the :py:class:`bob.learn.misc.GMMMachine` has been set, you can use it to
 estimate the log-likelihood of an input feature vector with a matching number
 of dimensions:
 
@@ -130,12 +130,12 @@ of dimensions:
   >>> log_likelihood = gmm(numpy.array([5.1, 4.7, -4.9], 'float64'))
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`xbob.learn.misc.GMMMachine.save` and the class constructor respectively.
+:py:meth:`bob.learn.misc.GMMMachine.save` and the class constructor respectively.
 
 Gaussian mixture models Statistics
 ==================================
 
-The :py:class:`xbob.learn.misc.GMMStats` is a container for the sufficient
+The :py:class:`bob.learn.misc.GMMStats` is a container for the sufficient
 statistics of a GMM distribution.
 
 Given a GMM, the sufficient statistics of a sample can be computed as
@@ -144,7 +144,7 @@ follows:
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = xbob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.misc.GMMStats(2,3)
   >>> sample = numpy.array([0.5, 4.5, 1.5])
   >>> gmm.acc_statistics(sample, gs)
   >>> print(gs) # doctest: +SKIP
@@ -155,7 +155,7 @@ considering the following attributes.
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = xbob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.misc.GMMStats(2,3)
   >>> log_likelihood = -3. # log-likelihood of the accumulated samples
   >>> T = 1 # Number of samples used to accumulate statistics
   >>> n = numpy.array([0.4, 0.6], 'float64') # zeroth order stats
@@ -176,19 +176,19 @@ a within-class subspace :math:`U`, a between-class subspace :math:`V`, and a
 subspace for the residuals :math:`D` to capture and suppress a significant
 portion of between-class variation.
 
-An instance of :py:class:`xbob.learn.misc.JFABase` carries information about
+An instance of :py:class:`bob.learn.misc.JFABase` carries information about
 the matrices :math:`U`, :math:`V` and :math:`D`, which can be shared between
 several classes.  In contrast, after the enrolment phase, an instance of
-:py:class:`xbob.learn.misc.JFAMachine` carries class-specific information about
+:py:class:`bob.learn.misc.JFAMachine` carries class-specific information about
 the latent variables :math:`y` and :math:`z`.
 
-An instance of :py:class:`xbob.learn.misc.JFABase` can be initialized as
+An instance of :py:class:`bob.learn.misc.JFABase` can be initialized as
 follows, given an existing GMM:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> jfa_base = xbob.learn.misc.JFABase(gmm,2,2) # dimensions of U and V are both equal to 2
+  >>> jfa_base = bob.learn.misc.JFABase(gmm,2,2) # dimensions of U and V are both equal to 2
   >>> U = numpy.array([[1, 2], [3, 4], [5, 6], [7, 8], [9, 10], [11, 12]], 'float64')
   >>> V = numpy.array([[6, 5], [4, 3], [2, 1], [1, 2], [3, 4], [5, 6]], 'float64')
   >>> d = numpy.array([0, 1, 0, 1, 0, 1], 'float64')
@@ -196,33 +196,33 @@ follows, given an existing GMM:
   >>> jfa_base.v = V
   >>> jfa_base.d = d
 
-Next, this :py:class:`xbob.learn.misc.JFABase` can be shared by several
-instances of :py:class:`xbob.learn.misc.JFAMachine`, the initialization being
+Next, this :py:class:`bob.learn.misc.JFABase` can be shared by several
+instances of :py:class:`bob.learn.misc.JFAMachine`, the initialization being
 as follows:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> m = xbob.learn.misc.JFAMachine(jfa_base)
+  >>> m = bob.learn.misc.JFAMachine(jfa_base)
   >>> m.y = numpy.array([1,2], 'float64')
   >>> m.z = numpy.array([3,4,1,2,0,1], 'float64')
 
 
-Once the :py:class:`xbob.learn.misc.JFAMachine` has been configured for a
+Once the :py:class:`bob.learn.misc.JFAMachine` has been configured for a
 specific class, the log-likelihood (score) that an input sample belongs to the
 enrolled class, can be estimated, by first computing the GMM sufficient
 statistics of this input sample, and then calling the
-:py:meth:`xbob.learn.misc.JFAMachine:forward` on the sufficient statistics.
+:py:meth:`bob.learn.misc.JFAMachine:forward` on the sufficient statistics.
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = xbob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.misc.GMMStats(2,3)
   >>> gmm.acc_statistics(sample, gs)
   >>> score = m.forward(gs)
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`xbob.learn.misc.JFAMachine.save` and the class constructor
+:py:meth:`bob.learn.misc.JFAMachine.save` and the class constructor
 respectively.
 
 
@@ -236,47 +236,47 @@ the Gaussian mixture modelling approach. It utilises a within-class subspace
 significant portion of between-class variation. The main difference compared to
 JFA is the absence of the between-class subspace :math:`V`.
 
-Similarly to JFA, an instance of :py:class:`xbob.learn.misc.JFABase` carries
+Similarly to JFA, an instance of :py:class:`bob.learn.misc.JFABase` carries
 information about the matrices :math:`U` and :math:`D`, which can be shared
 between several classes, whereas an instance of
-:py:class:`xbob.learn.misc.JFAMachine` carries class-specific information about
+:py:class:`bob.learn.misc.JFAMachine` carries class-specific information about
 the latent variable :math:`z`.
 
-An instance of :py:class:`xbob.learn.misc.ISVBase` can be initialized as
+An instance of :py:class:`bob.learn.misc.ISVBase` can be initialized as
 follows, given an existing GMM:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> isv_base = xbob.learn.misc.ISVBase(gmm,2) # dimension of U is equal to 2
+  >>> isv_base = bob.learn.misc.ISVBase(gmm,2) # dimension of U is equal to 2
   >>> isv_base.u = U
   >>> isv_base.d = d
 
-Next, this :py:class:`xbob.learn.misc.ISVBase` can be shared by several
-instances of :py:class:`xbob.learn.misc.ISVMachine`, the initialization being
+Next, this :py:class:`bob.learn.misc.ISVBase` can be shared by several
+instances of :py:class:`bob.learn.misc.ISVMachine`, the initialization being
 as follows:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> m = xbob.learn.misc.ISVMachine(isv_base)
+  >>> m = bob.learn.misc.ISVMachine(isv_base)
   >>> m.z = numpy.array([3,4,1,2,0,1], 'float64')
 
-Once the :py:class:`xbob.learn.misc.ISVMachine` has been configured for a
+Once the :py:class:`bob.learn.misc.ISVMachine` has been configured for a
 specific class, the log-likelihood (score) that an input sample belongs to the
 enrolled class, can be estimated, by first computing the GMM sufficient
 statistics of this input sample, and then calling the
-:py:meth:`xbob.learn.misc.ISVMachine:forward` on the sufficient statistics.
+:py:meth:`bob.learn.misc.ISVMachine:forward` on the sufficient statistics.
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = xbob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.misc.GMMStats(2,3)
   >>> gmm.acc_statistics(sample, gs)
   >>> score = m.forward(gs)
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`xbob.learn.misc.ISVMachine.save` and the class constructor
+:py:meth:`bob.learn.misc.ISVMachine.save` and the class constructor
 respectively.
 
 
@@ -289,30 +289,30 @@ dimensionality called ``i-vectors``. The model consists of a subspace :math:`T`
 and a residual diagonal covariance matrix :math:`\Sigma`, that are then used to
 extract i-vectors, and is built upon the GMM approach.
 
-An instance of the class :py:class:`xbob.learn.misc.IVectorMachine` carries
+An instance of the class :py:class:`bob.learn.misc.IVectorMachine` carries
 information about these two matrices. This can be initialized as follows:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> m = xbob.learn.misc.IVectorMachine(gmm, 2)
+  >>> m = bob.learn.misc.IVectorMachine(gmm, 2)
   >>> m.t = numpy.array([[1.,2],[4,1],[0,3],[5,8],[7,10],[11,1]])
   >>> m.sigma = numpy.array([1.,2.,1.,3.,2.,4.])
 
 
-Once the :py:class:`xbob.learn.misc.IVectorMachine` has been set, the
+Once the :py:class:`bob.learn.misc.IVectorMachine` has been set, the
 extraction of an i-vector :math:`w_ij` can be done in two steps, by first
 extracting the GMM sufficient statistics, and then estimating the i-vector:
 
 .. doctest::
   :options: +NORMALIZE_WHITESPACE
 
-  >>> gs = xbob.learn.misc.GMMStats(2,3)
+  >>> gs = bob.learn.misc.GMMStats(2,3)
   >>> gmm.acc_statistics(sample, gs)
   >>> w_ij = m.forward(gs)
 
 As with other machines you can save and re-load machines of this type using
-:py:meth:`xbob.learn.misc.IVectorMachine.save` and the class constructor
+:py:meth:`bob.learn.misc.IVectorMachine.save` and the class constructor
 respectively.
 
 
@@ -332,22 +332,22 @@ diagonal covariance matrix :math:`\Sigma`, the model assumes that a sample
 
 Information about a PLDA model (:math:`\mu`, :math:`F`, :math:`G` and
 :math:`\Sigma`) are carried out by an instance of the class
-:py:class:`xbob.learn.misc.PLDABase`.
+:py:class:`bob.learn.misc.PLDABase`.
 
 .. doctest::
 
    >>> ### This creates a PLDABase container for input feature of dimensionality 3,
    >>> ### and with subspaces F and G of rank 1 and 2 respectively.
-   >>> pldabase = xbob.learn.misc.PLDABase(3,1,2)
+   >>> pldabase = bob.learn.misc.PLDABase(3,1,2)
 
 Class-specific information (usually from enrollment samples) are contained in
-an instance of :py:class:`xbob.learn.misc.PLDAMachine`, that must be attached
-to a given :py:class:`xbob.learn.misc.PLDABase`. Once done, log-likelihood
+an instance of :py:class:`bob.learn.misc.PLDAMachine`, that must be attached
+to a given :py:class:`bob.learn.misc.PLDABase`. Once done, log-likelihood
 computations can be performed.
 
 .. doctest::
 
-   >>> plda = xbob.learn.misc.PLDAMachine(pldabase)
+   >>> plda = bob.learn.misc.PLDAMachine(pldabase)
    >>> samples = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
    >>> loglike = plda.compute_log_likelihood(samples)
 
@@ -373,13 +373,13 @@ container.
    >>> data = numpy.array([[3,-3,100], [4,-4,98], [3.5,-3.5,99], [-7,7,-100], [-5,5,-101]], dtype='float64')
 
 The training procedure will learn the `means` for the
-:py:class:`xbob.learn.misc.KMeansMachine`. The number :math:`k` of `means` is given
+:py:class:`bob.learn.misc.KMeansMachine`. The number :math:`k` of `means` is given
 when creating the `machine`, as well as the dimensionality of the features.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> kmeans = xbob.learn.misc.KMeansMachine(2, 3) # Create a machine with k=2 clusters with a dimensionality equal to 3
+   >>> kmeans = bob.learn.misc.KMeansMachine(2, 3) # Create a machine with k=2 clusters with a dimensionality equal to 3
 
 Then training procedure for `k-means` is an **Expectation-Maximization**-based
 [8]_ algorithm. There are several options that can be set such as the maximum
@@ -390,7 +390,7 @@ be called.
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> kmeansTrainer = xbob.learn.misc.KMeansTrainer()
+   >>> kmeansTrainer = bob.learn.misc.KMeansTrainer()
    >>> kmeansTrainer.max_iterations = 200
    >>> kmeansTrainer.convergence_threshold = 1e-5
 
@@ -407,18 +407,18 @@ A Gaussian **mixture model** (GMM) [9]_ is a common probabilistic model. In
 order to train the parameters of such a model it is common to use a
 **maximum-likelihood** (ML) approach [10]_. To do this we use an
 **Expectation-Maximization** (EM) algorithm [8]_. Let's first start by creating
-a :py:class:`xbob.learn.misc.GMMMachine`. By default, all of the Gaussian's have
+a :py:class:`bob.learn.misc.GMMMachine`. By default, all of the Gaussian's have
 zero-mean and unit variance, and all the weights are equal. As a starting
 point, we could set the mean to the one obtained with **k-means** [7]_.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> gmm = xbob.learn.misc.GMMMachine(2,3) # Create a machine with 2 Gaussian and feature dimensionality 3
+   >>> gmm = bob.learn.misc.GMMMachine(2,3) # Create a machine with 2 Gaussian and feature dimensionality 3
    >>> gmm.means = kmeans.means # Set the means to the one obtained with k-means
 
 The |project| class to learn the parameters of a GMM [9]_ using ML [10]_ is
-:py:class:`xbob.learn.misc.ML_GMMTrainer`. It uses an **EM**-based [8]_ algorithm
+:py:class:`bob.learn.misc.ML_GMMTrainer`. It uses an **EM**-based [8]_ algorithm
 and requires the user to specify which parameters of the GMM are updated at
 each iteration (means, variances and/or weights). In addition, and as for
 **k-means** [7]_, it has parameters such as the maximum number of iterations
@@ -427,7 +427,7 @@ and the criterion used to determine if the parameters have converged.
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> trainer = xbob.learn.misc.ML_GMMTrainer(True, True, True) # update means/variances/weights at each iteration
+   >>> trainer = bob.learn.misc.ML_GMMTrainer(True, True, True) # update means/variances/weights at each iteration
    >>> trainer.convergence_threshold = 1e-5
    >>> trainer.max_iterations = 200
    >>> trainer.train(gmm, data)
@@ -459,7 +459,7 @@ The training data used to compute the MAP estimate [11]_ is again stored in a
    >>> dataMAP = numpy.array([[7,-7,102], [6,-6,103], [-3.5,3.5,-97]], dtype='float64')
 
 The |project| class used to perform MAP adaptation training [11]_ is
-:py:class:`xbob.learn.misc.MAP_GMMTrainer`. As with the ML estimate [10]_, it uses
+:py:class:`bob.learn.misc.MAP_GMMTrainer`. As with the ML estimate [10]_, it uses
 an **EM**-based [8]_ algorithm and requires the user to specify which parts of
 the GMM are adapted at each iteration (means, variances and/or weights). In
 addition, it also has parameters such as the maximum number of iterations and
@@ -472,12 +472,12 @@ set.
    :options: +NORMALIZE_WHITESPACE
 
    >>> relevance_factor = 4.
-   >>> trainer = xbob.learn.misc.MAP_GMMTrainer(relevance_factor, True, False, False) # mean adaptation only
+   >>> trainer = bob.learn.misc.MAP_GMMTrainer(relevance_factor, True, False, False) # mean adaptation only
    >>> trainer.convergence_threshold = 1e-5
    >>> trainer.max_iterations = 200
    >>> trainer.set_prior_gmm(gmm)
    True
-   >>> gmmAdapted = xbob.learn.misc.GMMMachine(2,3) # Create a new machine for the MAP estimate
+   >>> gmmAdapted = bob.learn.misc.GMMMachine(2,3) # Create a new machine for the MAP estimate
    >>> trainer.train(gmmAdapted, dataMAP)
    >>> print(gmmAdapted) # doctest: +SKIP
 
@@ -501,40 +501,40 @@ done, we get a training set of GMM statistics:
    >>> N2 = numpy.array([0.1069, 0.9397, 0.6164, 0.3545]).reshape((2,2))
    >>> N=[N1, N2]
 
-   >>> gs11 = xbob.learn.misc.GMMStats(2,3)
+   >>> gs11 = bob.learn.misc.GMMStats(2,3)
    >>> gs11.n = N1[:,0]
    >>> gs11.sum_px = F1[:,0].reshape(2,3)
-   >>> gs12 = xbob.learn.misc.GMMStats(2,3)
+   >>> gs12 = bob.learn.misc.GMMStats(2,3)
    >>> gs12.n = N1[:,1]
    >>> gs12.sum_px = F1[:,1].reshape(2,3)
 
-   >>> gs21 = xbob.learn.misc.GMMStats(2,3)
+   >>> gs21 = bob.learn.misc.GMMStats(2,3)
    >>> gs21.n = N2[:,0]
    >>> gs21.sum_px = F2[:,0].reshape(2,3)
-   >>> gs22 = xbob.learn.misc.GMMStats(2,3)
+   >>> gs22 = bob.learn.misc.GMMStats(2,3)
    >>> gs22.n = N2[:,1]
    >>> gs22.sum_px = F2[:,1].reshape(2,3)
 
    >>> TRAINING_STATS = [[gs11, gs12], [gs21, gs22]]
 
-In the following, we will allocate a :py:class:`xbob.learn.misc.JFABase` machine,
+In the following, we will allocate a :py:class:`bob.learn.misc.JFABase` machine,
 that will then be trained.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-    >>> jfa_base = xbob.learn.misc.JFABase(gmm, 2, 2) # the dimensions of U and V are both equal to 2
+    >>> jfa_base = bob.learn.misc.JFABase(gmm, 2, 2) # the dimensions of U and V are both equal to 2
 
 Next, we initialize a trainer, which is an instance of
-:py:class:`xbob.learn.misc.JFATrainer`, as follows:
+:py:class:`bob.learn.misc.JFATrainer`, as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> jfa_trainer = xbob.learn.misc.JFATrainer(10) # 10 is the number of iterations
+   >>> jfa_trainer = bob.learn.misc.JFATrainer(10) # 10 is the number of iterations
 
 The training process is started by calling the
-:py:meth:`xbob.learn.misc.JFATrainer.train`.
+:py:meth:`bob.learn.misc.JFATrainer.train`.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
@@ -552,10 +552,10 @@ the following.
 
    >>> Ne = numpy.array([0.1579, 0.9245, 0.1323, 0.2458]).reshape((2,2))
    >>> Fe = numpy.array([0.1579, 0.1925, 0.3242, 0.1234, 0.2354, 0.2734, 0.2514, 0.5874, 0.3345, 0.2463, 0.4789, 0.5236]).reshape((6,2))
-   >>> gse1 = xbob.learn.misc.GMMStats(2,3)
+   >>> gse1 = bob.learn.misc.GMMStats(2,3)
    >>> gse1.n = Ne[:,0]
    >>> gse1.sum_px = Fe[:,0].reshape(2,3)
-   >>> gse2 = xbob.learn.misc.GMMStats(2,3)
+   >>> gse2 = bob.learn.misc.GMMStats(2,3)
    >>> gse2.n = Ne[:,1]
    >>> gse2.sum_px = Fe[:,1].reshape(2,3)
    >>> gse = [gse1, gse2]
@@ -566,7 +566,7 @@ the class-specific latent variables :math:`y` and :math:`z`:
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> m = xbob.learn.misc.JFAMachine(jfa_base)
+   >>> m = bob.learn.misc.JFAMachine(jfa_base)
    >>> jfa_trainer.enrol(m, gse, 5) # where 5 is the number of enrollment iterations
 
 More information about the training process can be found in [12]_ and [13]_.
@@ -579,23 +579,23 @@ The training of the subspace :math:`U` and :math:`D` of an Inter-Session
 Variability model, is performed in two steps. As for JFA, GMM sufficient
 statistics of the training samples should be computed against the UBM GMM. Once
 done, we get a training set of GMM statistics.  Next, we will allocate an
-:py:class:`xbob.learn.misc.ISVBase` machine, that will then be trained.
+:py:class:`bob.learn.misc.ISVBase` machine, that will then be trained.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-    >>> isv_base = xbob.learn.misc.ISVBase(gmm, 2) # the dimensions of U is equal to 2
+    >>> isv_base = bob.learn.misc.ISVBase(gmm, 2) # the dimensions of U is equal to 2
 
 Next, we initialize a trainer, which is an instance of
-:py:class:`xbob.learn.misc.ISVTrainer`, as follows:
+:py:class:`bob.learn.misc.ISVTrainer`, as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> isv_trainer = xbob.learn.misc.ISVTrainer(10, 4.) # 10 is the number of iterations, and 4 is the relevance factor
+   >>> isv_trainer = bob.learn.misc.ISVTrainer(10, 4.) # 10 is the number of iterations, and 4 is the relevance factor
 
 The training process is started by calling the
-:py:meth:`xbob.learn.misc.ISVTrainer.train`.
+:py:meth:`bob.learn.misc.ISVTrainer.train`.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
@@ -611,7 +611,7 @@ estimate the class-specific latent variable :math:`z`:
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> m = xbob.learn.misc.ISVMachine(isv_base)
+   >>> m = bob.learn.misc.ISVMachine(isv_base)
    >>> isv_trainer.enrol(m, gse, 5) # where 5 is the number of iterations
 
 More information about the training process can be found in [14]_ and [13]_.
@@ -624,27 +624,27 @@ The training of the subspace :math:`T` and :math:`\Sigma` of a Total
 Variability model, is performed in two steps. As for JFA and ISV, GMM
 sufficient statistics of the training samples should be computed against the
 UBM GMM. Once done, we get a training set of GMM statistics.  Next, we will
-allocate an instance of :py:class:`xbob.learn.misc.IVectorMachine`, that will
+allocate an instance of :py:class:`bob.learn.misc.IVectorMachine`, that will
 then be trained.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-    >>> m = xbob.learn.misc.IVectorMachine(gmm, 2)
+    >>> m = bob.learn.misc.IVectorMachine(gmm, 2)
     >>> m.variance_threshold = 1e-5
 
 
 Next, we initialize a trainer, which is an instance of
-:py:class:`xbob.learn.misc.IVectorTrainer`, as follows:
+:py:class:`bob.learn.misc.IVectorTrainer`, as follows:
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
 
-   >>> ivec_trainer = xbob.learn.misc.IVectorTrainer(update_sigma=True, max_iterations=10)
+   >>> ivec_trainer = bob.learn.misc.IVectorTrainer(update_sigma=True, max_iterations=10)
    >>> TRAINING_STATS_flatten = [gs11, gs12, gs21, gs22]
 
 The training process is started by calling the
-:py:meth:`xbob.learn.misc.IVectorTrainer.train`.
+:py:meth:`bob.learn.misc.IVectorTrainer.train`.
 
 .. doctest::
    :options: +NORMALIZE_WHITESPACE
@@ -671,7 +671,7 @@ diagonal covariance matrix :math:`\Sigma`, the model assumes that a sample
 An Expectaction-Maximization algorithm can be used to learn the parameters of
 this model :math:`\mu`, :math:`F` :math:`G` and :math:`\Sigma`. As these
 parameters can be shared between classes, there is a specific container class
-for this purpose, which is :py:class:`xbob.learn.misc.PLDABase`. The process is
+for this purpose, which is :py:class:`bob.learn.misc.PLDABase`. The process is
 described in detail in [17]_.
 
 Let us consider a training set of two classes, each with 3 samples of
@@ -685,43 +685,43 @@ dimensionality 3.
    >>> data = [data1,data2]
 
 Learning a PLDA model can be performed by instantiating the class
-:py:class:`xbob.learn.misc.PLDATrainer`, and calling the
-:py:meth:`xbob.learn.misc.PLDATrainer.train()` method.
+:py:class:`bob.learn.misc.PLDATrainer`, and calling the
+:py:meth:`bob.learn.misc.PLDATrainer.train()` method.
 
 .. doctest::
 
    >>> ### This creates a PLDABase container for input feature of dimensionality 3,
    >>> ### and with subspaces F and G of rank 1 and 2 respectively.
-   >>> pldabase = xbob.learn.misc.PLDABase(3,1,2)
+   >>> pldabase = bob.learn.misc.PLDABase(3,1,2)
 
-   >>> trainer = xbob.learn.misc.PLDATrainer()
+   >>> trainer = bob.learn.misc.PLDATrainer()
    >>> trainer.train(pldabase, data)
 
 Once trained, this PLDA model can be used to compute the log-likelihood of a
 set of samples given some hypothesis. For this purpose, a
-:py:class:`xbob.learn.misc.PLDAMachine` should be instantiated. Then, the
+:py:class:`bob.learn.misc.PLDAMachine` should be instantiated. Then, the
 log-likelihood that a set of samples share the same latent identity variable
 :math:`h_{i}` (i.e. the samples are coming from the same identity/class) is
 obtained by calling the
-:py:meth:`xbob.learn.misc.PLDAMachine.compute_log_likelihood()` method.
+:py:meth:`bob.learn.misc.PLDAMachine.compute_log_likelihood()` method.
 
 .. doctest::
 
-   >>> plda = xbob.learn.misc.PLDAMachine(pldabase)
+   >>> plda = bob.learn.misc.PLDAMachine(pldabase)
    >>> samples = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
    >>> loglike = plda.compute_log_likelihood(samples)
 
 If separate models for different classes need to be enrolled, each of them with
 a set of enrolment samples, then, several instances of
-:py:class:`xbob.learn.misc.PLDAMachine` need to be created and enroled using
-the :py:meth:`xbob.learn.misc.PLDATrainer.enrol()` method as follows.
+:py:class:`bob.learn.misc.PLDAMachine` need to be created and enroled using
+the :py:meth:`bob.learn.misc.PLDATrainer.enrol()` method as follows.
 
 .. doctest::
 
-   >>> plda1 = xbob.learn.misc.PLDAMachine(pldabase)
+   >>> plda1 = bob.learn.misc.PLDAMachine(pldabase)
    >>> samples1 = numpy.array([[3.5,-3.4,102], [4.5,-4.3,56]], dtype=numpy.float64)
    >>> trainer.enrol(plda1, samples1)
-   >>> plda2 = xbob.learn.misc.PLDAMachine(pldabase)
+   >>> plda2 = bob.learn.misc.PLDAMachine(pldabase)
    >>> samples2 = numpy.array([[3.5,7,-49], [4.5,8.9,-99]], dtype=numpy.float64)
    >>> trainer.enrol(plda2, samples2)
 
@@ -738,8 +738,8 @@ separately for each model.
 In a verification scenario, there are two possible hypotheses: 1.
 :math:`x_{test}` and :math:`x_{enrol}` share the same class.  2.
 :math:`x_{test}` and :math:`x_{enrol}` are from different classes.  Using the
-methods :py:meth:`xbob.learn.misc.PLDAMachine:call()` or
-:py:meth:`xbob.learn.misc.PLDAMachine:forward()`, the corresponding
+methods :py:meth:`bob.learn.misc.PLDAMachine:call()` or
+:py:meth:`bob.learn.misc.PLDAMachine:forward()`, the corresponding
 log-likelihood ratio will be computed, which is defined in more formal way by:
 :math:`s = \ln(P(x_{test},x_{enrol})) - \ln(P(x_{test})P(x_{enrol}))`
 
diff --git a/doc/py_api.rst b/doc/py_api.rst
index 8cd4ba0..c1c9541 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -7,8 +7,8 @@
 ============
 
 This section includes information for using the pure Python API of
-``xbob.learn.misc``.
+``bob.learn.misc``.
 
 
-.. automodule:: xbob.learn.misc
+.. automodule:: bob.learn.misc
 
diff --git a/setup.py b/setup.py
index 99b1868..b2619e9 100644
--- a/setup.py
+++ b/setup.py
@@ -4,22 +4,22 @@
 # Mon 16 Apr 08:18:08 2012 CEST
 
 from setuptools import setup, find_packages, dist
-dist.Distribution(dict(setup_requires=['xbob.blitz', 'xbob.io.base']))
-from xbob.blitz.extension import Extension
-import xbob.io.base
+dist.Distribution(dict(setup_requires=['bob.blitz', 'bob.io.base']))
+from bob.blitz.extension import Extension
+import bob.io.base
 
 import os
-include_dirs = [xbob.io.base.get_include()]
+include_dirs = [bob.io.base.get_include()]
 
 packages = ['bob-machine >= 2.0.0a2', 'bob-trainer >= 2.0.0a2', 'boost']
 version = '2.0.0a0'
 
 setup(
 
-    name='xbob.learn.misc',
+    name='bob.learn.misc',
     version=version,
     description='Bindings for miscelaneous machines and trainers',
-    url='http://github.com/bioidiap/xbob.learn.misc',
+    url='http://github.com/bioidiap/bob.learn.misc',
     license='BSD',
     author='Andre Anjos',
     author_email='andre.anjos@idiap.ch',
@@ -31,59 +31,59 @@ setup(
 
     install_requires=[
       'setuptools',
-      'xbob.blitz',
-      'xbob.core',
-      'xbob.io.base',
-      'xbob.sp',
+      'bob.blitz',
+      'bob.core',
+      'bob.io.base',
+      'bob.sp',
       ],
 
     namespace_packages=[
-      "xbob",
-      "xbob.learn",
+      "bob",
+      "bob.learn",
       ],
 
     ext_modules = [
-      Extension("xbob.learn.misc.version",
+      Extension("bob.learn.misc.version",
         [
-          "xbob/learn/misc/version.cpp",
+          "bob/learn/misc/version.cpp",
           ],
         packages = packages,
         include_dirs = include_dirs,
         version = version,
         ),
-      Extension("xbob.learn.misc._library",
+      Extension("bob.learn.misc._library",
         [
-          "xbob/learn/misc/bic.cpp",
-          "xbob/learn/misc/bic_trainer.cpp",
-          "xbob/learn/misc/empca_trainer.cpp",
-          "xbob/learn/misc/gabor.cpp",
-          "xbob/learn/misc/gaussian.cpp",
-          "xbob/learn/misc/gmm.cpp",
-          "xbob/learn/misc/gmm_trainer.cpp",
-          "xbob/learn/misc/ivector.cpp",
-          "xbob/learn/misc/ivector_trainer.cpp",
-          "xbob/learn/misc/jfa.cpp",
-          "xbob/learn/misc/jfa_trainer.cpp",
-          "xbob/learn/misc/kmeans.cpp",
-          "xbob/learn/misc/kmeans_trainer.cpp",
-          "xbob/learn/misc/machine.cpp",
-          "xbob/learn/misc/linearscoring.cpp",
-          "xbob/learn/misc/plda.cpp",
-          "xbob/learn/misc/plda_trainer.cpp",
-          "xbob/learn/misc/wiener.cpp",
-          "xbob/learn/misc/wiener_trainer.cpp",
-          "xbob/learn/misc/ztnorm.cpp",
+          "bob/learn/misc/bic.cpp",
+          "bob/learn/misc/bic_trainer.cpp",
+          "bob/learn/misc/empca_trainer.cpp",
+          "bob/learn/misc/gabor.cpp",
+          "bob/learn/misc/gaussian.cpp",
+          "bob/learn/misc/gmm.cpp",
+          "bob/learn/misc/gmm_trainer.cpp",
+          "bob/learn/misc/ivector.cpp",
+          "bob/learn/misc/ivector_trainer.cpp",
+          "bob/learn/misc/jfa.cpp",
+          "bob/learn/misc/jfa_trainer.cpp",
+          "bob/learn/misc/kmeans.cpp",
+          "bob/learn/misc/kmeans_trainer.cpp",
+          "bob/learn/misc/machine.cpp",
+          "bob/learn/misc/linearscoring.cpp",
+          "bob/learn/misc/plda.cpp",
+          "bob/learn/misc/plda_trainer.cpp",
+          "bob/learn/misc/wiener.cpp",
+          "bob/learn/misc/wiener_trainer.cpp",
+          "bob/learn/misc/ztnorm.cpp",
 
           # external requirements as boost::python bindings
-          "xbob/learn/misc/GaborWaveletTransform.cpp",
-          "xbob/learn/misc/blitz_numpy.cpp",
-          "xbob/learn/misc/ndarray.cpp",
-          "xbob/learn/misc/ndarray_numpy.cpp",
-          "xbob/learn/misc/tinyvector.cpp",
-          "xbob/learn/misc/hdf5.cpp",
-          "xbob/learn/misc/random.cpp",
+          "bob/learn/misc/GaborWaveletTransform.cpp",
+          "bob/learn/misc/blitz_numpy.cpp",
+          "bob/learn/misc/ndarray.cpp",
+          "bob/learn/misc/ndarray_numpy.cpp",
+          "bob/learn/misc/tinyvector.cpp",
+          "bob/learn/misc/hdf5.cpp",
+          "bob/learn/misc/random.cpp",
 
-          "xbob/learn/misc/main.cpp",
+          "bob/learn/misc/main.cpp",
         ],
         packages = packages,
         boost_modules = ['python'],
-- 
GitLab