diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index eda47399581d7cd723bf5e98abe311bfccb11dfa..7bb2b74b414555e43ac312a30888d1bd5cc3fca2 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -5,6 +5,7 @@ from bob.pipelines import wrap
 from bob.bio.face.helpers import face_crop_solver
 import numpy as np
 
+
 def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
     """
     Computes the default cropped positions for the FaceCropper used with Facenet-like 
@@ -32,32 +33,51 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
 
         TOP_LEFT_POS = (0, 0)
         BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-        cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
+        cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
 
     elif annotation_type == "eyes-center":
 
-        RIGHT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(1/3*CROPPED_IMAGE_WIDTH))
-        LEFT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(2/3*CROPPED_IMAGE_WIDTH))
-        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
+        RIGHT_EYE_POS = (
+            round(2 / 7 * CROPPED_IMAGE_HEIGHT),
+            round(1 / 3 * CROPPED_IMAGE_WIDTH),
+        )
+        LEFT_EYE_POS = (
+            round(2 / 7 * CROPPED_IMAGE_HEIGHT),
+            round(2 / 3 * CROPPED_IMAGE_WIDTH),
+        )
+        cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
 
     elif annotation_type == "left-profile":
 
-        EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
-        MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
-        cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
+        EYE_POS = (
+            round(2 / 7 * CROPPED_IMAGE_HEIGHT),
+            round(3 / 8 * CROPPED_IMAGE_WIDTH),
+        )
+        MOUTH_POS = (
+            round(5 / 7 * CROPPED_IMAGE_HEIGHT),
+            round(3 / 8 * CROPPED_IMAGE_WIDTH),
+        )
+        cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
 
     elif annotation_type == "right-profile":
 
-        EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
-        MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
-        cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
-    
+        EYE_POS = (
+            round(2 / 7 * CROPPED_IMAGE_HEIGHT),
+            round(5 / 8 * CROPPED_IMAGE_WIDTH),
+        )
+        MOUTH_POS = (
+            round(5 / 7 * CROPPED_IMAGE_HEIGHT),
+            round(5 / 8 * CROPPED_IMAGE_WIDTH),
+        )
+        cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
+
     else:
 
         cropped_positions = None
 
     return cropped_positions
 
+
 def legacy_default_cropping(cropped_image_size, annotation_type):
     """
     Computes the default cropped positions for the FaceCropper used with legacy extractors, 
@@ -85,33 +105,41 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
 
         TOP_LEFT_POS = (0, 0)
         BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-        cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
+        cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
 
     elif annotation_type == "eyes-center":
 
         RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
         LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
-        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
+        cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
 
     elif annotation_type == "left-profile":
         # Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
-        EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
-        MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
-        cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
+        EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
+        MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
+        cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
 
     elif annotation_type == "right-profile":
         # Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
-        EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
-        MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
-        cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
-    
+        EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
+        MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
+        cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
+
     else:
 
         cropped_positions = None
 
     return cropped_positions
 
-def embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions=None, color_channel = "rgb"):
+
+def embedding_transformer(
+    cropped_image_size,
+    embedding,
+    annotation_type,
+    cropped_positions,
+    fixed_positions=None,
+    color_channel="rgb",
+):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
     This transformer is suited for Facenet based architectures
@@ -121,13 +149,15 @@ def embedding_transformer(cropped_image_size, embedding, annotation_type, croppe
     
     """
     face_cropper = face_crop_solver(
-            cropped_image_size,
-            color_channel=color_channel,
-            cropped_positions=cropped_positions,
-            fixed_positions=fixed_positions,
-        )
+        cropped_image_size,
+        color_channel=color_channel,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+    )
 
-    transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
+    transform_extra_arguments = (
+        None if cropped_positions is None else (("annotations", "annotations"),)
+    )
 
     transformer = make_pipeline(
         wrap(
@@ -140,7 +170,10 @@ def embedding_transformer(cropped_image_size, embedding, annotation_type, croppe
 
     return transformer
 
-def embedding_transformer_160x160(embedding, annotation_type, fixed_positions, color_channel="rgb"):
+
+def embedding_transformer_160x160(
+    embedding, annotation_type, fixed_positions, color_channel="rgb"
+):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
     This transformer is suited for Facenet based architectures
@@ -149,12 +182,23 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions, c
        This will resize images to :math:`160 \times 160`
     
     """
-    cropped_positions = embedding_transformer_default_cropping((160, 160), annotation_type)
+    cropped_positions = embedding_transformer_default_cropping(
+        (160, 160), annotation_type
+    )
 
-    return embedding_transformer((160, 160), embedding, annotation_type, cropped_positions, fixed_positions, color_channel=color_channel)
+    return embedding_transformer(
+        (160, 160),
+        embedding,
+        annotation_type,
+        cropped_positions,
+        fixed_positions,
+        color_channel=color_channel,
+    )
 
 
-def embedding_transformer_112x112(embedding, annotation_type, fixed_positions, color_channel="rgb"):
+def embedding_transformer_112x112(
+    embedding, annotation_type, fixed_positions, color_channel="rgb"
+):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
     This transformer is suited for Facenet based architectures
@@ -166,12 +210,52 @@ def embedding_transformer_112x112(embedding, annotation_type, fixed_positions, c
     cropped_image_size = (112, 112)
     if annotation_type == "eyes-center":
         # Hard coding eye positions for backward consistency
-        cropped_positions = {'leye': (32, 77), 'reye': (32, 34)}
+        cropped_positions = {"leye": (32, 77), "reye": (32, 34)}
     else:
-        # Will use default 
-        cropped_positions = embedding_transformer_default_cropping(cropped_image_size, annotation_type)
+        # Will use default
+        cropped_positions = embedding_transformer_default_cropping(
+            cropped_image_size, annotation_type
+        )
 
-    return embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions, color_channel=color_channel)
+    return embedding_transformer(
+        cropped_image_size,
+        embedding,
+        annotation_type,
+        cropped_positions,
+        fixed_positions,
+        color_channel=color_channel,
+    )
+
+
+def embedding_transformer_224x224(
+    embedding, annotation_type, fixed_positions, color_channel="rgb"
+):
+    """
+    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
+    This transformer is suited for Facenet based architectures
+    
+    .. warning::
+       This will resize images to :math:`112 \times 112`
+    
+    """
+    cropped_image_size = (224, 224)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
+    else:
+        # Will use default
+        cropped_positions = embedding_transformer_default_cropping(
+            cropped_image_size, annotation_type
+        )
+
+    return embedding_transformer(
+        cropped_image_size,
+        embedding,
+        annotation_type,
+        cropped_positions,
+        fixed_positions,
+        color_channel=color_channel,
+    )
 
 
 def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
@@ -212,15 +296,16 @@ def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
 
     cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
 
-
     face_cropper = face_crop_solver(
-            cropped_image_size,
-            color_channel=color_channel,
-            cropped_positions=cropped_positions,
-            fixed_positions=fixed_positions,
-            dtype=dtype
-        )
-    
-    transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
+        cropped_image_size,
+        color_channel=color_channel,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        dtype=dtype,
+    )
+
+    transform_extra_arguments = (
+        None if cropped_positions is None else (("annotations", "annotations"),)
+    )
 
     return face_cropper, transform_extra_arguments
diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py
index 9b04b9decc79e3099b987b1e67c2843e34a096e0..ca9a77c46ac73242f615819a8dbba68e5805983a 100644
--- a/bob/bio/face/preprocessor/FaceCrop.py
+++ b/bob/bio/face/preprocessor/FaceCrop.py
@@ -93,7 +93,7 @@ class FaceCrop(Base):
       your database easily. If you are sure about your input, you can set this flag to
       ``True``.
 
-    annotator : :any:`bob.bio.base.annotator.Annotator`
+    annotator : `bob.bio.base.annotator.Annotator`
       If provided, the annotator will be used if the required annotations are
       missing.
 
diff --git a/bob/bio/face/preprocessor/HistogramEqualization.py b/bob/bio/face/preprocessor/HistogramEqualization.py
index 63f7a594bd06b99cf4eb51be8bf396310aebbefa..95523e97bc39f29bfc584756bc7325c3d6e83686 100644
--- a/bob/bio/face/preprocessor/HistogramEqualization.py
+++ b/bob/bio/face/preprocessor/HistogramEqualization.py
@@ -28,8 +28,8 @@ from bob.pipelines.sample import SampleBatch
 class HistogramEqualization(Base):
     """Crops the face (if desired) and performs histogram equalization to photometrically enhance the image.
 
-      Parameters:
-      -----------
+      Parameters
+      ----------
 
       face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None``
         The face image cropper that should be applied to the image.
diff --git a/doc/annotators.rst b/doc/annotators.rst
index b7cb0abf8051202fed2ee56924bb6df2863e5050..2277b00fa989e4591d4087dfa6d556b1ab159ed7 100644
--- a/doc/annotators.rst
+++ b/doc/annotators.rst
@@ -7,7 +7,7 @@
 =================
 
 This packages provides several face annotators (using RGB images) that you can
-use to annotate biometric databases. See :ref:`bob.bio.base.annotations` for
+use to annotate biometric databases. See :any:`bob.bio.base.annotator` for
 a guide on the general usage of this feature.
 
 .. warning::
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 8ce69a0c0f8d976465660972895bb518360b61a2..2217e5af8bf96bb90dc7b6964dee14f5fbdca275 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -8,60 +8,51 @@
 Executing Baseline Algorithms
 =============================
 
-.. todo::
-   Here we should:   
-     - Brief how to run an experiment
-     - Point to bob.bio.base for further explanation
-     - Show the baselines available
-     - Show the databases available
 
+In this section we introduce the baselines available in this pakcage.
+To execute one of then in the databases available just run the following command::
 
-The baselines
--------------
+$ bob bio pipelines vanilla-biometrics [DATABASE_NAME] [BASELINE]
 
-The algorithms present an (incomplete) set of state-of-the-art face recognition algorithms. Here is the list of short-cuts:
+.. note::
+  Both, `[DATABASE_NAME]` and `[BASELINE]` can be either python resources or
+  python files.
 
-* ``eigenface``: The eigenface algorithm as proposed by [TP91]_. It uses the pixels as raw data, and applies a *Principal Component Analysis* (PCA) on it:
+  Please, refer to :ref:`bob.bio.base <bob.bio.base>` for more information.  
 
-  - preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
-  - feature : :py:class:`bob.bio.base.extractor.Linearize`
-  - algorithm : :py:class:`bob.bio.base.algorithm.PCA`
 
-* ``lda``: The LDA algorithm applies a *Linear Discriminant Analysis* (LDA), here we use the combined PCA+LDA approach [ZKC98]_:
 
-  - preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
-  - feature : :py:class:`bob.bio.face.extractor.Eigenface`
-  - algorithm : :py:class:`bob.bio.base.algorithm.LDA`
+Baselines available
+-------------------
 
-* ``gaborgraph``: This method extract grid graphs of Gabor jets from the images, and computes a Gabor phase based similarity [GHW12]_.
+The algorithms below constains all the face recognition baselines available.
+It is split in two groups, before and after deep learning era.
 
-  - preprocessor : :py:class:`bob.bio.face.preprocessor.INormLBP`
-  - feature : :py:class:`bob.bio.face.extractor.GridGraph`
-  - algorithm : :py:class:`bob.bio.face.algorithm.GaborJet`
 
+Before Deep learning era
+========================
 
-Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` package is installed:
 
-* ``gmm``: *Gaussian Mixture Models* (GMM) [MM09]_ are extracted from *Discrete Cosine Transform* (DCT) block features.
+* ``eigenface``: The eigenface algorithm as proposed by [TP91]_. It uses the pixels as raw data, and applies a *Principal Component Analysis* (PCA) on it.
 
-  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
-  - feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
-  - algorithm : :py:class:`bob.bio.gmm.algorithm.GMM`
+* ``lda``: The LDA algorithm applies a *Linear Discriminant Analysis* (LDA), here we use the combined PCA+LDA approach [ZKC98]_
 
-* ``isv``: As an extension of the GMM algorithm, *Inter-Session Variability* (ISV) modeling [WMM11]_ is used to learn what variations in images are introduced by identity changes and which not.
+* ``gabor_graph``: This method extract grid graphs of Gabor jets from the images, and computes a Gabor phase based similarity [GHW12]_.
 
-  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
-  - feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
-  - algorithm : :py:class:`bob.bio.gmm.algorithm.ISV`
+* ``lgbphs``: Local Gabor binary pattern histogram sequence (LGBPHS) implemented in [ZSG05]_
 
-* ``ivector``: Another extension of the GMM algorithm is *Total Variability* (TV) modeling [WM12]_ (aka. I-Vector), which tries to learn a subspace in the GMM super-vector space.
 
-  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
-  - feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
-  - algorithm : :py:class:`bob.bio.gmm.algorithm.IVector`
+Deep learning baselines
+=======================
 
-.. note::
-  The ``ivector`` algorithm needs a lot of training data and fails on small databases such as the `AT&T database`_.
+* ``facenet-sanderberg``: FaceNet trained by `David Sanderberg <https://github.com/davidsandberg/facenet>`_
+
+* ``inception-resnetv2-msceleb``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+
+* ``inception-resnetv1-msceleb``: Inception Resnet v1 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+
+* ``inception-resnetv2-casiawebface``: Inception Resnet v2 model trained using the Casia Web dataset in the context of the work published by [TFP18]_
 
-.. _bob.bio.base.baseline_results:
+* ``inception-resnetv1-casiawebface``: Inception Resnet v1 model trained using the Casia Web dataset in the context of the work published by [TFP18]_
 
+* ``arcface-insightface``: Arcface model from `Insightface <https://github.com/deepinsight/insightface>`_
diff --git a/doc/implemented.rst b/doc/implemented.rst
index 19ab5aaa11b52d36d506f9de79b9af0cdc393ccc..ee0af21d26b1c13033a9d5d395a9a3dca6fc6f80 100644
--- a/doc/implemented.rst
+++ b/doc/implemented.rst
@@ -14,15 +14,11 @@ Databases
    bob.bio.face.database.ARFaceBioDatabase
    bob.bio.face.database.AtntBioDatabase
    bob.bio.face.database.MobioBioDatabase
-   bob.bio.face.database.CaspealBioDatabase
    bob.bio.face.database.ReplayBioDatabase
    bob.bio.face.database.ReplayMobileBioDatabase
    bob.bio.face.database.GBUBioDatabase
    bob.bio.face.database.LFWBioDatabase
    bob.bio.face.database.MultipieBioDatabase
-   bob.bio.face.database.XM2VTSBioDatabase
-   bob.bio.face.database.FRGCBioDatabase
-   bob.bio.face.database.SCFaceBioDatabase
    bob.bio.face.database.FargoBioDatabase
 
 
diff --git a/doc/index.rst b/doc/index.rst
index 23bae2667d36221480784e589799554622cb7168..e0fbecd27ea700dc708cb89bb58339ec9312e274 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -4,9 +4,9 @@
 
 .. _bob.bio.face:
 
-=============================
- Open Source Face Recognition
-=============================
+=====================================
+ Open Source Face Recognition Library
+=====================================
 
 
 This package provide open source tools to run comparable and reproducible face recognition experiments.
@@ -15,32 +15,30 @@ This includes:
 * Preprocessors to detect, align and photometrically enhance face images
 * Feature extractors that extract features from facial images
 * Facial image databases including their protocols.
-* Scripts that trains CNNs
-
-For more detailed information about how this package is structured, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`.
+* Scripts that trains CNNs for face recognition.
 
 
 Get Started
-============
+===========
 
 The easiest way to get started is by simply comparing two faces::
 
-$ bob bio compare-samples -p gabor_graph me.png not_me.png
+$ bob bio compare-samples -p facenet-sanderberg me.png not_me.png
 
 .. warning::
    No face detection is carried out with this command.
 
 Check out all the face recognition algorithms available by doing::
 
-$ resources.py --type p
+$ resources.py --types p
 
 
 Get Started, serious 
 ====================
 
-.. todo::
-
-   Briefing about baselines
+For detailed information on how this package is structured and how
+to run experiments with it, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`
+and get to know the vanilla biometrics and how to integrate both, algorithm and database protocols with it.
  
 
 Users Guide
@@ -50,7 +48,7 @@ Users Guide
    :maxdepth: 2
 
    baselines
-   leaderboad
+   leaderboard/leaderboard
    references
    annotators
 
diff --git a/doc/leaderboard.rst b/doc/leaderboard.rst
deleted file mode 100644
index 08c1257eff684a8c215099a5b9f39c3cbf734d9b..0000000000000000000000000000000000000000
--- a/doc/leaderboard.rst
+++ /dev/null
@@ -1,14 +0,0 @@
-.. vim: set fileencoding=utf-8 :
-
-.. _bob.bio.face.learderboard:
-
-=============================
-Leaderboad
-=============================
-
-.. todo::
-   Here we should:   
-     - Present a custom Leaderboad per database
-     - Present a script that runs at least one experiment of this leader board
-
-
diff --git a/doc/leaderboard/arface.rst b/doc/leaderboard/arface.rst
new file mode 100644
index 0000000000000000000000000000000000000000..fadb1e0ba313fe2f5c3fcd95a25a9b66f427f61f
--- /dev/null
+++ b/doc/leaderboard/arface.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.arface:
+
+==============
+ARFACE Dataset
+==============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/banca.rst b/doc/leaderboard/banca.rst
new file mode 100644
index 0000000000000000000000000000000000000000..3a467606d1d0df1b20b72b0576a704461461426d
--- /dev/null
+++ b/doc/leaderboard/banca.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.banca:
+
+=============
+Banca Dataset
+=============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/gbu.rst b/doc/leaderboard/gbu.rst
new file mode 100644
index 0000000000000000000000000000000000000000..70fbe8fb8f215c12808ee72306a667b10c96a1f6
--- /dev/null
+++ b/doc/leaderboard/gbu.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.gbu:
+
+===========
+GBU Dataset
+===========
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/ijbc.rst b/doc/leaderboard/ijbc.rst
new file mode 100644
index 0000000000000000000000000000000000000000..67d409ec417bc3fb62fdddaab1ad974b82ed7d70
--- /dev/null
+++ b/doc/leaderboard/ijbc.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.ijbc:
+
+=============
+IJB-C Dataset
+=============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/leaderboard.rst b/doc/leaderboard/leaderboard.rst
new file mode 100644
index 0000000000000000000000000000000000000000..e86d2c0d5ea9d58e89799f94bf024b8a89639d54
--- /dev/null
+++ b/doc/leaderboard/leaderboard.rst
@@ -0,0 +1,28 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard:
+
+==========
+Leaderboad
+==========
+
+In the following pages we present a face recognition learderboard with some popular datasets.
+
+Datasets
+--------
+
+.. toctree::
+   :maxdepth: 2
+
+   mobio
+   lfw
+   meds
+   morph
+   ijbc
+   uccs
+   multipie
+   arface
+   xm2vts
+   gbu
+   banca
+   
diff --git a/doc/leaderboard/lfw.rst b/doc/leaderboard/lfw.rst
new file mode 100644
index 0000000000000000000000000000000000000000..4fda5d6f07dc004d9b783da7a916f24ee007b633
--- /dev/null
+++ b/doc/leaderboard/lfw.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.lfw:
+
+===========
+LFW Dataset
+===========
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/meds.rst b/doc/leaderboard/meds.rst
new file mode 100644
index 0000000000000000000000000000000000000000..934a13b1fe957d64c2ffaf513a8b8a8ed0ae9695
--- /dev/null
+++ b/doc/leaderboard/meds.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.meds:
+
+============
+MEDS Dataset
+============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/mobio.rst b/doc/leaderboard/mobio.rst
new file mode 100644
index 0000000000000000000000000000000000000000..708566505b5b666b590f415f0b2f2545b25048b4
--- /dev/null
+++ b/doc/leaderboard/mobio.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.mobio:
+
+=============
+Mobio Dataset
+=============
+
+
+.. todo::
+   Benchmarks on Mobio Database
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/morph.rst b/doc/leaderboard/morph.rst
new file mode 100644
index 0000000000000000000000000000000000000000..1bfc35311f19ec4f77bcf5a085abe3fd36480fac
--- /dev/null
+++ b/doc/leaderboard/morph.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.morph:
+
+=============
+Morph Dataset
+=============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/multipie.rst b/doc/leaderboard/multipie.rst
new file mode 100644
index 0000000000000000000000000000000000000000..035a819009221259de2e76bc1744a4998d047ce8
--- /dev/null
+++ b/doc/leaderboard/multipie.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.multipie:
+
+================
+Multipie Dataset
+================
+
+
+.. todo::
+   Benchmarks on Multipie Database
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/uccs.rst b/doc/leaderboard/uccs.rst
new file mode 100644
index 0000000000000000000000000000000000000000..003d37380986284247405b8cb197c64b2578aa88
--- /dev/null
+++ b/doc/leaderboard/uccs.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.uccs:
+
+============
+UCCS Dataset
+============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/xm2vts.rst b/doc/leaderboard/xm2vts.rst
new file mode 100644
index 0000000000000000000000000000000000000000..faf38692442f086ad0a728c411172ba3e6e1493f
--- /dev/null
+++ b/doc/leaderboard/xm2vts.rst
@@ -0,0 +1,13 @@
+.. vim: set fileencoding=utf-8 :
+
+.. _bob.bio.face.learderboard.xm2vts:
+
+==============
+XM2VTS Dataset
+==============
+
+
+.. todo::
+   Present benchmarks
+
+   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/references.rst b/doc/references.rst
index 22fb8ffc61e6e09e7706923be6d9501f3eb254d5..ea60e1ad5e565618d66c19fe260dabb1b4b4df45 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -8,19 +8,12 @@ References
 
 .. [TP91]    *M. Turk and A. Pentland*. **Eigenfaces for recognition**. Journal of Cognitive Neuroscience, 3(1):71-86, 1991.
 .. [ZKC98]  *W. Zhao, A. Krishnaswamy, R. Chellappa, D. Swets and J. Weng*. **Discriminant analysis of principal components for face recognition**, pages 73-85. Springer Verlag Berlin, 1998.
-.. [MWP98]   *B. Moghaddam, W. Wahid and A. Pentland*. **Beyond eigenfaces: probabilistic matching for face recognition**. IEEE International Conference on Automatic Face and Gesture Recognition, pages 30-35. 1998.
 .. [GHW12]   *M. Günther, D. Haufe and R.P. Würtz*. **Face recognition with disparity corrected Gabor phase differences**. In Artificial neural networks and machine learning, volume 7552 of Lecture Notes in Computer Science, pages 411-418. 9/2012.
 .. [ZSG05]  *W. Zhang, S. Shan, W. Gao, X. Chen and H. Zhang*. **Local Gabor binary pattern histogram sequence (LGBPHS): a novel non-statistical model for face representation and recognition**. Computer Vision, IEEE International Conference on, 1:786-791, 2005.
-.. [MM09]    *C. McCool, S. Marcel*. **Parts-based face verification using local frequency bands**. In Advances in biometrics, volume 5558 of Lecture Notes in Computer Science. 2009.
-.. .. [WMM12]  *R. Wallace, M. McLaren, C. McCool and S. Marcel*. **Cross-pollination of normalisation techniques from speaker to face authentication using Gaussian mixture models**. IEEE Transactions on Information Forensics and Security, 2012.
 .. [WMM11]  *R. Wallace, M. McLaren, C. McCool and S. Marcel*. **Inter-session variability modelling and joint factor analysis for face authentication**. International Joint Conference on Biometrics. 2011.
-.. [Pri07]   *S. J. D. Prince*. **Probabilistic linear discriminant analysis for inferences about identity**. Proceedings of the International Conference on Computer Vision. 2007.
-.. [ESM13]  *L. El Shafey, Chris McCool, Roy Wallace and Sébastien Marcel*. **A scalable formulation of probabilistic linear discriminant analysis: applied to face recognition**. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(7):1788-1794, 7/2013.
-.. [WM12]    *R. Wallace and M. McLaren*. **Total variability modelling for face verification**. IET Biometrics, vol.1, no.4, 188-199, 12/2012
 .. [TT10]    *X. Tan and B. Triggs*. **Enhanced local texture feature sets for face recognition under difficult lighting conditions**. IEEE Transactions on Image Processing, 19(6):1635-1650, 2010.
 .. [WLW04]   *H. Wang, S.Z. Li and Y. Wang*. **Face recognition under varying lighting conditions using self quotient image**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), pages 819-824. 2004.
-.. .. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
 .. [WFK97]   *L. Wiskott, J.-M. Fellous, N. Krüger and C.v.d. Malsburg*. **Face recognition by elastic bunch graph matching**. IEEE Transactions on Pattern Analysis and Machine Intelligence, 19:775-779, 1997.
 .. [ZSQ09]  *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
-.. [GW09]    *M. Günther and R.P. Würtz*. **Face detection and recognition using maximum likelihood classifiers on Gabor graphs**. International Journal of Pattern Recognition and Artificial Intelligence, 23(3):433-461, 2009.
-.. .. [GWM12]   *M. Günther, R. Wallace and S. Marcel*. **An Open Source Framework for Standardized Comparisons of Face Recognition Algorithms**. Computer Vision - ECCV 2012. Workshops and Demonstrations, LNCS, 7585, 547-556, 2012.
+.. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+.. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.