diff --git a/buildout.cfg b/buildout.cfg
index d86bf8ca3f842c0ba52bf085254c0dca0f010a4c..aa737a64ad2cd4019426d2b70ed8fbcedb00cbc8 100644
--- a/buildout.cfg
+++ b/buildout.cfg
@@ -13,13 +13,18 @@ eggs = sphinx
        bob
        bob.example.faceverify
 
+; This points to the bob databases that are installed locally at Idiap
+; TODO: remove this as soon as the databases are published at pypi
+find-links = http://www.idiap.ch/software/bob/packages/xbob/nightlies/last
+
 ; This defines the installation directory of Bob.
 ; The current setup should work fine for use at Idiap.
 ; If you are not at Idiap, and bob is not installed in the default location,
 ; please modify the egg-directories accordingly.
+; TODO: Change this directory back to the stable version of bob, when appropriate
 [external]
 recipe = xbob.buildout:external
-egg-directories = /idiap/group/torch5spro/releases/bob-1.0.5/install/linux-x86_64-release/lib
+egg-directories = /idiap/group/torch5spro/nightlies/last/install/linux-x86_64-release/lib
 
 
 [python]
diff --git a/faceverify/dct_ubm.py b/faceverify/dct_ubm.py
index a2c1a38e205509db649b3eff10eb4e15875cd052..20c5210da6e98b5cbd994330c9fd1845ad33e3f5 100644
--- a/faceverify/dct_ubm.py
+++ b/faceverify/dct_ubm.py
@@ -1,4 +1,5 @@
 import bob
+import xbob.db.atnt
 import os, sys
 import numpy
 from matplotlib import pyplot
@@ -38,15 +39,15 @@ def extract_feature(image):
   # compute shape of the image blocks
   block_shape = bob.ip.get_block_shape(image, DCT_BLOCK_SIZE, DCT_BLOCK_SIZE, DCT_BLOCK_OVERLAP, DCT_BLOCK_OVERLAP)
   image_blocks = numpy.ndarray(block_shape, 'float64')
-  
+
   # fill image blocks
   bob.ip.block(image, image_blocks, DCT_BLOCK_SIZE, DCT_BLOCK_SIZE, DCT_BLOCK_OVERLAP, DCT_BLOCK_OVERLAP)
 
   # perform DCT on image blocks
   dct_blocks = dct_extractor(image_blocks)
-  
+
   return dct_blocks
-  
+
 
 
 # Parameters of the UBM/GMM module training
@@ -72,7 +73,7 @@ def train(training_features):
 
   # train using the KMeansTrainer
   kmeans_trainer.train(kmeans, training_set)
-  
+
   [variances, weights] = kmeans.get_variances_and_weights_for_each_cluster(training_set)
   means = kmeans.means
 
@@ -100,7 +101,7 @@ def enrol(model_features, ubm, gmm_trainer):
   # train the GMM
   gmm_trainer.train(gmm, enrol_set)
 
-  # return the resulting gmm    
+  # return the resulting gmm
   return gmm
 
 
@@ -108,21 +109,21 @@ def stats(probe_feature, ubm):
   """Computes the UBM Statistics for the given feature vector"""
   # compute the UBM stats for the given probe feature
   probe_feature = bob.io.Arrayset(probe_feature)
-  
+
   # Accumulate statistics
   gmm_stats = bob.machine.GMMStats(ubm.dim_c, ubm.dim_d)
   gmm_stats.init()
   ubm.acc_statistics(probe_feature, gmm_stats)
-  
+
   return gmm_stats
-  
+
 
 def main():
   """This function will perform an a DCT block extraction and a UBM/GMM modeling test on the AT&T database"""
-  
+
   # use the bob.db interface to retrieve information about the Database
-  atnt_db = bob.db.atnt.Database()
-  
+  atnt_db = xbob.db.atnt.Database()
+
   # check if the AT&T database directory is overwritten by the command line
   global ATNT_IMAGE_DIRECTORY
   if len(sys.argv) > 1:
@@ -132,12 +133,12 @@ def main():
   if not os.path.isdir(ATNT_IMAGE_DIRECTORY):
     print "The database directory '" + ATNT_IMAGE_DIRECTORY + "' does not exists!"
     return
-  
+
   #####################################################################
-  ### UBM Training 
+  ### UBM Training
   # load all training images
-  training_images = load_images(atnt_db, group = 'train')
-  
+  training_images = load_images(atnt_db, group = 'world')
+
   print "Extracting training features"
   training_features = {}
   for key, image in training_images.iteritems():
@@ -145,20 +146,20 @@ def main():
 
   print "Training UBM model"
   ubm = train(training_features)
-  
+
   #####################################################################
   ### GMM model enrollment
   print "Enrolling GMM models"
   gmm_trainer = bob.trainer.MAP_GMMTrainer()
   gmm_trainer.max_iterations = 1
   gmm_trainer.set_prior_gmm(ubm)
-  
+
   # create a GMM model for each model identity
-  model_ids = atnt_db.client_ids(groups = 'test')
+  model_ids = atnt_db.client_ids(groups = 'dev')
   models = {}
   for model_id in model_ids:
     # load images for the current model id
-    model_images = load_images(atnt_db, group = 'test', purpose = 'enrol', client_id = model_id)
+    model_images = load_images(atnt_db, group = 'dev', purpose = 'enrol', client_id = model_id)
     models_for_current_id = {}
     # extract model features
     for key, image in model_images.iteritems():
@@ -166,12 +167,12 @@ def main():
     # enroll model for the current identity from these features
     model = enrol(models_for_current_id, ubm, gmm_trainer)
     models[model_id] = model
-    
+
   #####################################################################
   ### probe stats
-  
+
   print "Computing probe statistics"
-  probe_images = load_images(atnt_db, group = 'test', purpose = 'probe')
+  probe_images = load_images(atnt_db, group = 'dev', purpose = 'probe')
   probes = {}
   for key, image in probe_images.iteritems():
     # extract probe features
@@ -183,7 +184,7 @@ def main():
   ### compute scores, we here choose a simple Euclidean distance measure
   positive_scores = []
   negative_scores = []
-  
+
   print "Computing scores"
   distance_function = bob.machine.linear_scoring
 
@@ -192,24 +193,24 @@ def main():
     for probe_key, probe_stats in probes.iteritems():
       # compute score
       score = distance_function([model_gmm], ubm, [probe_stats])[0,0]
-      
+
       # check if this is a positive score
       if model_id == atnt_db.get_client_id_from_file_id(probe_key):
         positive_scores.append(score)
       else:
         negative_scores.append(score)
-        
+
   print "Evaluation"
   # convert list of scores to numpy arrays
   positives = numpy.array(positive_scores)
   negatives = numpy.array(negative_scores)
-  
+
   # compute equal error rate
   threshold = bob.measure.eer_threshold(negatives, positives)
   FAR, FRR = bob.measure.farfrr(negatives, positives, threshold)
-  
+
   print "Result: FAR", FAR, "and FRR", FRR, "at threshold", threshold
-  
+
   # plot ROC curve
   bob.measure.plot.roc(negatives, positives)
   pyplot.xlabel("False Rejection Rate (%)")
@@ -217,11 +218,11 @@ def main():
   pyplot.title("ROC Curve for UBM/GMM based AT&T Verification Experiment")
   pyplot.grid()
   pyplot.axis([0, 100, 0, 100]) #xmin, xmax, ymin, ymax
-  
+
   # save plot to file
   pyplot.savefig("dct_ubm.png")
 
   # show ROC curve.
   # enable it if you like. This will open a window and display the ROC curve
-#  pyplot.show()  
- 
+#  pyplot.show()
+
diff --git a/faceverify/eigenface.py b/faceverify/eigenface.py
index 0a08784743650c18ec641b9e53b4f808ee5c54da..18491b1f245745e5a28af26d334877152ae4fb81 100644
--- a/faceverify/eigenface.py
+++ b/faceverify/eigenface.py
@@ -1,4 +1,5 @@
 import bob
+import xbob.db.atnt
 import os, sys
 import numpy
 from matplotlib import pyplot
@@ -22,7 +23,7 @@ def load_images(db, group = None, purpose = None):
     # load image and linearize it into a vector
     images[key] = bob.io.load(image_name).astype(numpy.float64)
   return images
-  
+
 
 # The number of eigenfaces that should be kept
 KEPT_EIGENFACES = 5
@@ -31,41 +32,41 @@ def train(training_images):
   """Trains the PCA module with the given list of training images"""
   # perform training using a SVD PCA trainer
   pca_trainer = bob.trainer.SVDPCATrainer()
-  
+
   # create array set used for training
   training_set = bob.io.Arrayset()
-  
+
   # iterate through the training examples and linearize the images
   for image in training_images.values():
     training_set.append(image.flatten())
 
   # training the SVD PCA returns a machine that can be used for projection
   pca_machine, eigen_values = pca_trainer.train(training_set)
-  
+
   # limit the number of kept eigenfaces
   pca_machine.resize(pca_machine.shape[0], KEPT_EIGENFACES)
-  
+
   return pca_machine
-  
+
 
 def extract_feature(image, pca_machine):
   """Projects the given list of images to the PCA subspace and returns the results"""
   # create projection result in the desired size
   projected_feature = numpy.ndarray((KEPT_EIGENFACES,), dtype = numpy.float64)
-  
+
   # project the data after linearizing them
   pca_machine(image.flatten(), projected_feature)
-  
+
   # return the projected data
   return projected_feature
 
 
 def main():
   """This function will perform an eigenface test on the AT&T database"""
-  
+
   # use the bob.db interface to retrieve information about the Database
-  atnt_db = bob.db.atnt.Database()
-  
+  atnt_db = xbob.db.atnt.Database()
+
   # check if the AT&T database directory is overwritten by the command line
   global ATNT_IMAGE_DIRECTORY
   if len(sys.argv) > 1:
@@ -75,13 +76,13 @@ def main():
   if not os.path.isdir(ATNT_IMAGE_DIRECTORY):
     print "The database directory '" + ATNT_IMAGE_DIRECTORY + "' does not exists!"
     return
-  
+
   #####################################################################
   ### Training
-  
+
   # load all training images
-  training_images = load_images(atnt_db, group = 'train')
-  
+  training_images = load_images(atnt_db, group = 'world')
+
   print "Training PCA machine"
   pca_machine = train(training_images)
 
@@ -89,9 +90,9 @@ def main():
   ### extract eigenface features of model and probe images
 
   # load model and probe images
-  model_images = load_images(atnt_db, group = 'test', purpose = 'enrol')
-  probe_images = load_images(atnt_db, group = 'test', purpose = 'probe')
-  
+  model_images = load_images(atnt_db, group = 'dev', purpose = 'enrol')
+  probe_images = load_images(atnt_db, group = 'dev', purpose = 'probe')
+
   print "Extracting models"
   model_features = {}
   for key, image in model_images.iteritems():
@@ -100,13 +101,13 @@ def main():
   probe_features = {}
   for key, image in probe_images.iteritems():
     probe_features[key] = extract_feature(image, pca_machine)
-  
+
 
   #####################################################################
   ### compute scores, we here choose a simple Euclidean distance measure
   positive_scores = []
   negative_scores = []
-  
+
   print "Computing scores"
   distance_function = bob.math.euclidean_distance
 
@@ -115,24 +116,24 @@ def main():
     for probe_key, probe_feature in probe_features.iteritems():
       # compute score
       score = distance_function(model_feature, probe_feature)
-      
+
       # check if this is a positive score
       if atnt_db.get_client_id_from_file_id(model_key) == atnt_db.get_client_id_from_file_id(probe_key):
         positive_scores.append(score)
       else:
         negative_scores.append(score)
-        
+
   print "Evaluation"
   # convert list of scores to numpy arrays
   positives = numpy.array(positive_scores)
   negatives = numpy.array(negative_scores)
-  
+
   # compute equal error rate
   threshold = bob.measure.eer_threshold(negatives, positives)
   FAR, FRR = bob.measure.farfrr(negatives, positives, threshold)
-  
+
   print "Result: FAR", FAR, "and FRR", FRR, "at threshold", threshold
-  
+
   # plot ROC curve
   bob.measure.plot.roc(negatives, positives)
   pyplot.xlabel("False Rejection Rate (%)")
@@ -141,10 +142,10 @@ def main():
   pyplot.grid()
   pyplot.axis([0, 100, 0, 100]) #xmin, xmax, ymin, ymax
 
-  # save plot to file     
+  # save plot to file
   pyplot.savefig("eigenface.png")
 
   # show ROC curve.
   # enable it if you like. This will open a window and display the ROC curve
-#  pyplot.show()  
- 
+#  pyplot.show()
+
diff --git a/faceverify/gabor_phase.py b/faceverify/gabor_phase.py
index f82d2b7abf7aaeaf40bb34d564331311a9e0cca0..3cc94519fd3c9dac181e0e6ce838068b1bc94a7b 100644
--- a/faceverify/gabor_phase.py
+++ b/faceverify/gabor_phase.py
@@ -1,4 +1,5 @@
 import bob
+import xbob.db.atnt
 import os, sys
 import numpy
 from matplotlib import pyplot
@@ -22,7 +23,7 @@ def load_images(db, group = None, purpose = None):
     # load image and linearize it into a vector
     images[key] = bob.io.load(image_name).astype(numpy.float64)
   return images
-  
+
 
 # define Gabor wavelet transform class globally since it is reused for all images
 gabor_wavelet_transform = bob.ip.GaborWaveletTransform()
@@ -37,22 +38,22 @@ def extract_feature(image, graph_machine):
   # add the shape of one Gabor jet
   shape.extend(jet_image[0,0].shape)
   gabor_graph = numpy.ndarray(shape, dtype = numpy.float64)
-  
+
   # perform Gabor wavelet transform on the image
   gabor_wavelet_transform.compute_jets(image, jet_image)
-  
+
   # extract the Gabor graphs from the feature image
   graph_machine(jet_image, gabor_graph)
-  
+
   # return the extracted graph
   return gabor_graph
 
 
 def main():
   """This function will perform Gabor graph comparison test on the AT&T database"""
-  
+
   # use the bob.db interface to retrieve information about the Database
-  atnt_db = bob.db.atnt.Database()
+  atnt_db = xbob.db.atnt.Database()
 
   # check if the AT&T database directory is overwritten by the command line
   global ATNT_IMAGE_DIRECTORY
@@ -63,22 +64,22 @@ def main():
   if not os.path.isdir(ATNT_IMAGE_DIRECTORY):
     print "The database directory '" + ATNT_IMAGE_DIRECTORY + "' does not exists!"
     return
-  
+
   #####################################################################
   ### Training
-  
-  # for Gabor graphs, no training is required. 
-  
+
+  # for Gabor graphs, no training is required.
+
   print "Creating Gabor graph machine"
   # create a machine that will produce tight Gabor graphs with inter-node distance (1,1)
   graph_machine = bob.machine.GaborGraphMachine((0,0), (111,91), (1,1))
-    
+
   #####################################################################
   ### extract Gabor graph features for all model and probe images
   # load all model and probe images
-  model_images = load_images(atnt_db, group = 'test', purpose = 'enrol')
-  probe_images = load_images(atnt_db, group = 'test', purpose = 'probe')
-  
+  model_images = load_images(atnt_db, group = 'dev', purpose = 'enrol')
+  probe_images = load_images(atnt_db, group = 'dev', purpose = 'probe')
+
   print "Extracting models"
   model_features = {}
   for key, image in model_images.iteritems():
@@ -87,17 +88,17 @@ def main():
   probe_features = {}
   for key, image in probe_images.iteritems():
     probe_features[key] = extract_feature(image, graph_machine)
-  
+
 
   #####################################################################
   ### compute scores, we here choose a simple Euclidean distance measure
   positive_scores = []
   negative_scores = []
-  
+
   print "Computing scores"
   # define a certain Gabor jet similarity function that should be used
   similarity_function = bob.machine.DisparityCorrectedPhaseDifference()
-  
+
   # iterate through models and probes and compute scores
   for model_key, model_feature in model_features.iteritems():
     for probe_key, probe_feature in probe_features.iteritems():
@@ -109,16 +110,16 @@ def main():
         positive_scores.append(score)
       else:
         negative_scores.append(score)
-        
+
   print "Evaluation"
   # convert list of scores to numpy arrays
   positives = numpy.array(positive_scores)
   negatives = numpy.array(negative_scores)
-  
+
   # compute equal error rate
   threshold = bob.measure.eer_threshold(negatives, positives)
   FAR, FRR = bob.measure.farfrr(negatives, positives, threshold)
-  
+
   print "Result: FAR", FAR, "and FRR", FRR, "at threshold", threshold
 
   # plot ROC curve
@@ -129,10 +130,10 @@ def main():
   pyplot.grid()
   pyplot.axis([0, 100, 0, 100]) #xmin, xmax, ymin, ymax
 
-  # save plot to file     
+  # save plot to file
   pyplot.savefig("gabor_phase.png")
 
   # show ROC curve.
   # enable it if you like. This will open a window and display the ROC curve
-#  pyplot.show()  
+#  pyplot.show()
 
diff --git a/setup.py b/setup.py
index ed51d15944225b2fd3ec3d245e2b72fb3fb04687..1b1932533c9c2bf35c73e775fb6a9923e9fb5712 100644
--- a/setup.py
+++ b/setup.py
@@ -45,9 +45,12 @@ setup(
     # on the current system will be installed locally and only visible to the
     # scripts of this package. Don't worry - You won't need administrative
     # privileges when using buildout.
+
+    # TODO: Add a version number requirement to bob, when ready
     install_requires=[
         "sphinx",                     # to generate the documentation
-        "bob >= 1.0.0, < 1.1.0",      # base signal proc./machine learning library
+        "bob",                        # base signal proc./machine learning library
+        "xbob.db.atnt",               # the AT&T (ORL) database of images
     ],
 
     # This entry defines which scripts you will have inside the 'bin' directory