diff --git a/bob/bio/base/__init__.py b/bob/bio/base/__init__.py
index 82ce2a09e27b49c5572bda388b25b6cdb1934df5..f55d534a0d2717a10b2fd14f8a6788097737b350 100644
--- a/bob/bio/base/__init__.py
+++ b/bob/bio/base/__init__.py
@@ -4,16 +4,19 @@ from . import preprocessor
 from . import extractor
 from . import algorithm
 from . import annotator
+from . import pipelines
 
 from . import script
 from . import test
 
+
 def get_config():
-  """Returns a string containing the configuration information.
+    """Returns a string containing the configuration information.
   """
-  import bob.extension
-  return bob.extension.get_config(__name__)
+    import bob.extension
+
+    return bob.extension.get_config(__name__)
 
 
 # gets sphinx autodoc done right - don't remove it
-__all__ = [_ for _ in dir() if not _.startswith('_')]
+__all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/bio/base/database/__init__.py b/bob/bio/base/database/__init__.py
index 8e071c9f60a94bae2b09a8e4b2709e7835752425..f893a1a77c0a60954a628798448632866879e3bc 100644
--- a/bob/bio/base/database/__init__.py
+++ b/bob/bio/base/database/__init__.py
@@ -1,4 +1,9 @@
-from .csv_dataset import CSVDatasetDevEval, CSVToSampleLoader, CSVDatasetCrossValidation
+from .csv_dataset import (
+    CSVDatasetDevEval,
+    CSVToSampleLoader,
+    CSVDatasetCrossValidation,
+    CSVBaseSampleLoader,
+)
 from .file import BioFile
 from .file import BioFileSet
 from .database import BioDatabase
@@ -9,7 +14,7 @@ from . import filelist
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
-  """Says object was actually declared here, and not in the import module.
+    """Says object was actually declared here, and not in the import module.
   Fixing sphinx warnings of not being able to find classes, when path is shortened.
   Parameters:
 
@@ -19,17 +24,18 @@ def __appropriate__(*args):
   <https://github.com/sphinx-doc/sphinx/issues/3048>`
   """
 
-  for obj in args:
-    obj.__module__ = __name__
+    for obj in args:
+        obj.__module__ = __name__
 
 
 __appropriate__(
     BioFile,
     BioFileSet,
     BioDatabase,
-    ZTBioDatabase,	
-	  CSVDatasetDevEval,
+    ZTBioDatabase,
+    CSVBaseSampleLoader,
+    CSVDatasetDevEval,
     CSVToSampleLoader,
-    CSVDatasetCrossValidation
+    CSVDatasetCrossValidation,
 )
-__all__ = [_ for _ in dir() if not _.startswith('_')]
+__all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/bio/base/database/csv_dataset.py b/bob/bio/base/database/csv_dataset.py
index bafb21a73aa7333aaa2ee7790ab4d7c29cab7e60..23dfbae7bef89ab238a71c8b91e6d21b0900eddf 100644
--- a/bob/bio/base/database/csv_dataset.py
+++ b/bob/bio/base/database/csv_dataset.py
@@ -67,25 +67,24 @@ class CSVToSampleLoader(CSVBaseSampleLoader):
     :any:`bob.pipelines.DelayedSample` or :any:`bob.pipelines.SampleSet`
     """
 
-    def __call__(self, filename):
-        def check_header(header):
-            """
-            A header should have at least "SUBJECT" AND "PATH"
-            """
-            header = [h.lower() for h in header]
-            if not "subject" in header:
-                raise ValueError(
-                    "The field `subject` is not available in your dataset."
-                )
+    def check_header(self, header):
+        """
+        A header should have at least "SUBJECT" AND "PATH"
+        """
+        header = [h.lower() for h in header]
+        if not "subject" in header:
+            raise ValueError("The field `subject` is not available in your dataset.")
 
-            if not "path" in header:
-                raise ValueError("The field `path` is not available in your dataset.")
+        if not "path" in header:
+            raise ValueError("The field `path` is not available in your dataset.")
+
+    def __call__(self, filename):
 
         with open(filename) as cf:
             reader = csv.reader(cf)
             header = next(reader)
 
-            check_header(header)
+            self.check_header(header)
             return [self.convert_row_to_sample(row, header) for row in reader]
 
     def convert_row_to_sample(self, row, header):
@@ -136,8 +135,8 @@ class CSVToSampleLoader(CSVBaseSampleLoader):
 
 class CSVDatasetDevEval:
     """
-    Generic filelist dataset for :any:`bob.bio.base.pipelines.VanillaBiometrics` pipeline.
-    Check :ref:`vanilla_biometrics_features` for more details about the Vanilla Biometrics Dataset
+    Generic filelist dataset for :any:` bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline` pipeline.
+    Check :any:`vanilla_biometrics_features` for more details about the Vanilla Biometrics Dataset
     interface.
 
     To create a new dataset, you need to provide a directory structure similar to the one below:
@@ -163,8 +162,8 @@ class CSVDatasetDevEval:
 
 
     Those csv files should contain in each row i-) the path to raw data and ii-) the subject label
-    for enrollment (:ref:`bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.Database.references`) and
-    probing (:ref:`bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.Database.probes`).
+    for enrollment (:any:`bob.bio.base.pipelines.vanilla_biometrics.Database.references`) and
+    probing (:any:`bob.bio.base.pipelines.vanilla_biometrics.Database.probes`).
     The structure of each CSV file should be as below:
 
     .. code-block:: text
@@ -192,7 +191,7 @@ class CSVDatasetDevEval:
     are optional and it is used in case a protocol contains data for evaluation.
     
     Finally, the content of the file `my_dataset/my_protocol/train.csv` is used in the case a protocol
-    contains data for training (:ref:`bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.Database.background_model_samples`)
+    contains data for training (`bob.bio.base.pipelines.vanilla_biometrics.Database.background_model_samples`)
 
     Parameters
     ----------
@@ -203,8 +202,8 @@ class CSVDatasetDevEval:
         protocol_na,e: str
           The name of the protocol
 
-        csv_to_sample_loader: :any:`CSVBaseSampleLoader`
-            Base class that whose objective is to generate :any:`bob.pipelines.Samples`
+        csv_to_sample_loader: :any:`bob.bio.base.database.CSVBaseSampleLoader`
+            Base class that whose objective is to generate :any:`bob.pipelines.Sample`
             and/or :any:`bob.pipelines.SampleSet` from csv rows
 
     """
@@ -330,10 +329,10 @@ class CSVDatasetDevEval:
 
 class CSVDatasetCrossValidation:
     """
-    Generic filelist dataset for :any:`bob.bio.base.pipelines.VanillaBiometrics` pipeline that 
+    Generic filelist dataset for :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline` pipeline that 
     handles **CROSS VALIDATION**.
 
-    Check :ref:`vanilla_biometrics_features` for more details about the Vanilla Biometrics Dataset
+    Check :any:`vanilla_biometrics_features` for more details about the Vanilla Biometrics Dataset
     interface.
 
 
@@ -365,8 +364,8 @@ class CSVDatasetCrossValidation:
     samples_for_enrollment: float
       Number of samples used for enrollment
 
-    csv_to_sample_loader: :any:`CSVBaseSampleLoader`
-        Base class that whose objective is to generate :any:`bob.pipelines.Samples`
+    csv_to_sample_loader: :any:`bob.bio.base.database.CSVBaseSampleLoader`
+        Base class that whose objective is to generate :any:`bob.pipelines.Sample`
         and/or :any:`bob.pipelines.SampleSet` from csv rows
 
     """
diff --git a/bob/bio/base/database/file.py b/bob/bio/base/database/file.py
index 675373625d6a39219346e8786750606a4199627b..8f23b2f428d315b5c67be10922fbc457677ba653 100644
--- a/bob/bio/base/database/file.py
+++ b/bob/bio/base/database/file.py
@@ -2,6 +2,7 @@ import bob.db.base
 from bob.db.base.annotations import read_annotation_file
 from bob.pipelines.sample import _ReprMixin
 
+
 class BioFile(bob.db.base.File, _ReprMixin):
     """
     A simple base class that defines basic properties of File object for the use
@@ -29,7 +30,7 @@ class BioFile(bob.db.base.File, _ReprMixin):
         The extension of annotation files. Default is ``.json``
     annotation_type : str or None
         The type of the annotation file, see
-        :any:`bob.db.base.annotations.read_annotation_file`. Default is
+        :`bob.db.base.annotations.read_annotation_file`. Default is
         ``json``.
     """
 
@@ -82,14 +83,14 @@ class BioFile(bob.db.base.File, _ReprMixin):
         if original_extension is None:
             original_extension = self.original_extension
         # get the path
-        path = self.make_path(
-            original_directory or "", original_extension or ""
-        )
+        path = self.make_path(original_directory or "", original_extension or "")
         return bob.io.base.load(path)
 
     @property
     def annotations(self):
-        path = self.make_path(self.annotation_directory or "", self.annotation_extension or "")
+        path = self.make_path(
+            self.annotation_directory or "", self.annotation_extension or ""
+        )
         return read_annotation_file(path, annotation_type=self.annotation_type)
 
 
diff --git a/bob/bio/base/pipelines/__init__.py b/bob/bio/base/pipelines/__init__.py
index edbb4090fca046b19d22d3982711084621bff3be..4b3de5c27ff1de35709b7e3943e0d6422092a079 100644
--- a/bob/bio/base/pipelines/__init__.py
+++ b/bob/bio/base/pipelines/__init__.py
@@ -1,4 +1,7 @@
 # see https://docs.python.org/3/library/pkgutil.html
 from pkgutil import extend_path
 
+
+from . import vanilla_biometrics
+
 __path__ = extend_path(__path__, __name__)
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/__init__.py b/bob/bio/base/pipelines/vanilla_biometrics/__init__.py
index 1554539476490058f2057022e5cacc98dda6aa57..96e784a441054acf58578bd35ce9ec75c94bf2f2 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/__init__.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/__init__.py
@@ -8,11 +8,59 @@ from .wrappers import (
     dask_vanilla_biometrics,
     checkpoint_vanilla_biometrics,
     dask_get_partition_size,
-    is_checkpointed
+    is_checkpointed,
 )
 
-from .zt_norm import ZTNormPipeline, ZTNormDaskWrapper, ZTNormCheckpointWrapper
+from .abstract_classes import BioAlgorithm, Database, ScoreWriter
+
+from .zt_norm import ZTNormPipeline, ZTNormDaskWrapper, ZTNormCheckpointWrapper, ZTNorm
 
 from .legacy import BioAlgorithmLegacy, DatabaseConnector
 
 from .vanilla_biometrics import execute_vanilla_biometrics
+
+
+# gets sphinx autodoc done right - don't remove it
+def __appropriate__(*args):
+    """Says object was actually declared here, and not in the import module.
+    Fixing sphinx warnings of not being able to find classes, when path is
+    shortened.
+
+    Parameters
+    ----------
+    *args
+        An iterable of objects to modify
+
+    Resolves `Sphinx referencing issues
+    <https://github.com/sphinx-doc/sphinx/issues/3048>`
+    """
+
+    for obj in args:
+        obj.__module__ = __name__
+
+
+__appropriate__(
+    VanillaBiometricsPipeline,
+    Distance,
+    FourColumnsScoreWriter,
+    CSVScoreWriter,
+    BioAlgorithmCheckpointWrapper,
+    BioAlgorithmDaskWrapper,
+    dask_vanilla_biometrics,
+    checkpoint_vanilla_biometrics,
+    dask_get_partition_size,
+    is_checkpointed,
+    ZTNormPipeline,
+    ZTNormDaskWrapper,
+    ZTNormCheckpointWrapper,
+    BioAlgorithmLegacy,
+    DatabaseConnector,
+    execute_vanilla_biometrics,
+    BioAlgorithm,
+    Database,
+    ScoreWriter,
+    ZTNorm,
+)
+
+__all__ = [_ for _ in dir() if not _.startswith("_")]
+
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/biometric_algorithms.py b/bob/bio/base/pipelines/vanilla_biometrics/biometric_algorithms.py
index 233c9863d7b07c98216f068922cfc45be3acf58a..16eeee6f17b75633ced2eea079f6560c24e340d2 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/biometric_algorithms.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/biometric_algorithms.py
@@ -21,14 +21,14 @@ class Distance(BioAlgorithm):
 
         Enrolls the model by storing all given input vectors.
 
-        Parameters:
-        -----------
+        Parameters
+        ----------
 
         ``enroll_features`` : [:py:class:`numpy.ndarray`]
           The list of projected features to enroll the model from.
 
-        Returns:
-        --------
+        Returns
+        -------
 
         ``model`` : 2D :py:class:`numpy.ndarray`
           The enrolled model.
@@ -43,8 +43,8 @@ class Distance(BioAlgorithm):
 
         Computes the distance of the model to the probe using the distance function specified in the constructor.
 
-        Parameters:
-        -----------
+        Parameters
+        ----------
 
         ``model`` : 2D :py:class:`numpy.ndarray`
           The model storing all enrollment features
@@ -52,8 +52,8 @@ class Distance(BioAlgorithm):
         ``probe`` : :py:class:`numpy.ndarray`
           The probe feature vector
 
-        Returns:
-        --------
+        Returns
+        -------
 
         ``score`` : float
           A similarity value between ``model`` and ``probe``
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/legacy.py b/bob/bio/base/pipelines/vanilla_biometrics/legacy.py
index aac7afc3e260e976079c45e65edf76ca784e28bb..d0f49582cbff6a6a3d2b946bdcd5dcf928f25ae6 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/legacy.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/legacy.py
@@ -52,7 +52,7 @@ class DatabaseConnector(Database):
 
     protocol : str
         The name of the protocol to generate samples from.
-        To be plugged at :py:method:`bob.db.base.Database.objects`.
+        To be plugged at `bob.db.base.Database.objects`.
 
     allow_scoring_with_all_biometric_references: bool
         If True will allow the scoring function to be performed in one shot with multiple probes.
@@ -294,7 +294,7 @@ class BioAlgorithmLegacy(BioAlgorithm):
 
     def write_scores(self, samples, path):
         os.makedirs(os.path.dirname(path), exist_ok=True)
-        #open(path, "wb").write(pickle.dumps(samples))
+        # open(path, "wb").write(pickle.dumps(samples))
         joblib.dump(samples, path, compress=4)
 
     def _score_sample_set(
@@ -304,7 +304,7 @@ class BioAlgorithmLegacy(BioAlgorithm):
         allow_scoring_with_all_biometric_references=False,
     ):
         def _load(path):
-            #return pickle.loads(open(path, "rb").read())
+            # return pickle.loads(open(path, "rb").read())
             return joblib.load(path)
 
         def _make_name(sampleset, biometric_references):
@@ -315,7 +315,8 @@ class BioAlgorithmLegacy(BioAlgorithm):
             return name + suffix
 
         path = os.path.join(
-            self.score_dir, _make_name(sampleset, biometric_references) + self._score_extension
+            self.score_dir,
+            _make_name(sampleset, biometric_references) + self._score_extension,
         )
 
         if self.force or not os.path.exists(path):
@@ -329,8 +330,9 @@ class BioAlgorithmLegacy(BioAlgorithm):
 
             self.write_scores(scored_sample_set.samples, path)
 
-            scored_sample_set = DelayedSampleSet(functools.partial(_load, path),
-                                                 parent=scored_sample_set)
+            scored_sample_set = DelayedSampleSet(
+                functools.partial(_load, path), parent=scored_sample_set
+            )
 
         else:
             scored_sample_set = SampleSet(_load(path), parent=sampleset)
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/pipelines.py b/bob/bio/base/pipelines/vanilla_biometrics/pipelines.py
index b0ec77bfc6b8052b90f35bfef6ce727eeeae824c..a26353ffd47e476a269ddd1bf854910841c66b12 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/pipelines.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/pipelines.py
@@ -60,8 +60,8 @@ class VanillaBiometricsPipeline(object):
       >>> pipeline(samples_for_training_back_ground_model, samplesets_for_enroll, samplesets_for_scoring).compute()
 
 
-    Parameters:
-    -----------
+    Parameters
+    ----------
 
       transformer: :py:class`sklearn.pipeline.Pipeline` or a `sklearn.base.BaseEstimator`
         Transformer that will preprocess your data
@@ -69,8 +69,8 @@ class VanillaBiometricsPipeline(object):
       biometric_algorithm: :py:class:`bob.bio.base.pipelines.vanilla_biometrics.abstract_classes.BioAlgorithm`
         Biometrics algorithm object that implements the methods `enroll` and `score` methods
 
-      score_writer: :any:`bob.bio.base.pipelines.vanilla_biometrics.abstract_classe.ScoreWriter`
-          Format to write scores. Default to :any:`FourColumnsScoreWriter`
+      score_writer: :any:`bob.bio.base.pipelines.vanilla_biometrics.ScoreWriter`
+          Format to write scores. Default to :any:`bob.bio.base.pipelines.vanilla_biometrics.FourColumnsScoreWriter`
 
     """
 
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/vanilla_biometrics.py b/bob/bio/base/pipelines/vanilla_biometrics/vanilla_biometrics.py
index ffcc645310ecf60728a5ddf7c5e9573018d2c0c3..29f07d57e84abbe95316426f4a2f29daf672e04f 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/vanilla_biometrics.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/vanilla_biometrics.py
@@ -17,7 +17,6 @@ from bob.pipelines.utils import isinstance_nested
 logger = logging.getLogger(__name__)
 
 
-
 def compute_scores(result, dask_client):
     if isinstance(result, Delayed) or isinstance(result, dask.bag.Bag):
         if dask_client is not None:
@@ -33,7 +32,6 @@ def post_process_scores(pipeline, scores, path):
     return pipeline.post_process(written_scores, path)
 
 
-
 def execute_vanilla_biometrics(
     pipeline,
     database,
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/wrappers.py b/bob/bio/base/pipelines/vanilla_biometrics/wrappers.py
index 31d0b7b5160f9c9b5081106c45e7ec73ce2c99b3..c9e6da050bdaa0fb37be7bb252ac40b10a6b6442 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/wrappers.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/wrappers.py
@@ -31,8 +31,8 @@ class BioAlgorithmCheckpointWrapper(BioAlgorithm):
 
     Parameters
     ----------
-        biometric_algorithm: :any:`BioAlgorithm`
-           An implemented :any:`BioAlgorithm`
+        biometric_algorithm: :any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm`
+           An implemented :any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm`
     
         base_dir: str
            Path to store biometric references and scores
@@ -182,7 +182,7 @@ class BioAlgorithmCheckpointWrapper(BioAlgorithm):
 
 class BioAlgorithmDaskWrapper(BioAlgorithm):
     """
-    Wrap :any:`BioAlgorithm` to work with DASK
+    Wrap :any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm` to work with DASK
     """
 
     def __init__(self, biometric_algorithm, **kwargs):
@@ -235,20 +235,20 @@ class BioAlgorithmDaskWrapper(BioAlgorithm):
 
 def dask_vanilla_biometrics(pipeline, npartitions=None, partition_size=None):
     """
-    Given a :any:`VanillaBiometrics`, wraps :any:`VanillaBiometrics.transformer` and
-    :any:`VanillaBiometrics.biometric_algorithm` to be executed with dask
+    Given a :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline`, wraps :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline` and
+    :any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm` to be executed with dask
 
     Parameters
     ----------
 
-    pipeline: :any:`VanillaBiometrics`
+    pipeline: :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline`
        Vanilla Biometrics based pipeline to be dasked
 
     npartitions: int
-       Number of partitions for the initial :any:`dask.bag`
+       Number of partitions for the initial `dask.bag`
 
     partition_size: int
-       Size of the partition for the initial :any:`dask.bag`
+       Size of the partition for the initial `dask.bag`
     """
 
     if isinstance(pipeline, ZTNormPipeline):
@@ -294,11 +294,11 @@ def dask_get_partition_size(cluster, n_objects):
     The heuristics is pretty simple, given the max number of possible workers to be run
     in a queue (not the number of current workers running) and a total number objects to be processed do n_objects/n_max_workers:
 
-    Parameters:
-    -----------
+    Parameters
+    ----------
 
-        cluster:  :any:`bob.pipelines.distributed.SGEMultipleQueuesCluster`
-            Cluster of the type :any:`bob.pipelines.distributed.SGEMultipleQueuesCluster`
+        cluster:  :any:`bob.pipelines.distributed.sge.SGEMultipleQueuesCluster`
+            Cluster of the type :any:`bob.pipelines.distributed.sge.SGEMultipleQueuesCluster`
 
         n_objects: int
             Number of objects to be processed
@@ -313,13 +313,13 @@ def dask_get_partition_size(cluster, n_objects):
 
 def checkpoint_vanilla_biometrics(pipeline, base_dir, biometric_algorithm_dir=None):
     """
-    Given a :any:`VanillaBiometrics`, wraps :any:`VanillaBiometrics.transformer` and
-    :any:`VanillaBiometrics.biometric_algorithm` to be checkpointed
+    Given a :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline`, wraps :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline` and
+    :any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm` to be checkpointed
 
     Parameters
     ----------
 
-    pipeline: :any:`VanillaBiometrics`
+    pipeline: :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline`
        Vanilla Biometrics based pipeline to be checkpointed
 
     base_dir: str
@@ -362,7 +362,9 @@ def checkpoint_vanilla_biometrics(pipeline, base_dir, biometric_algorithm_dir=No
         ):
             save_func = estimator.estimator.instance.write_feature
             load_func = estimator.estimator.instance.read_feature
-            estimator.estimator.projector_file = os.path.join(bio_ref_scores_dir,"Projector.hdf5")
+            estimator.estimator.projector_file = os.path.join(
+                bio_ref_scores_dir, "Projector.hdf5"
+            )
 
         wraped_estimator = bob.pipelines.wrap(
             ["checkpoint"],
@@ -374,7 +376,6 @@ def checkpoint_vanilla_biometrics(pipeline, base_dir, biometric_algorithm_dir=No
 
         sk_pipeline.steps[i] = (name, wraped_estimator)
 
-
     if isinstance(pipeline.biometric_algorithm, BioAlgorithmLegacy):
         pipeline.biometric_algorithm.base_dir = bio_ref_scores_dir
     else:
@@ -387,13 +388,13 @@ def checkpoint_vanilla_biometrics(pipeline, base_dir, biometric_algorithm_dir=No
 
 def is_checkpointed(pipeline):
     """
-    Check if :any:`VanillaBiometrics` is checkpointed
+    Check if :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline` is checkpointed
 
 
     Parameters
     ----------
 
-    pipeline: :any:`VanillaBiometrics`
+    pipeline: :any:`bob.bio.base.pipelines.vanilla_biometrics.VanillaBiometricsPipeline`
        Vanilla Biometrics based pipeline to be checkpointed
 
     """
@@ -402,7 +403,7 @@ def is_checkpointed(pipeline):
     # If it BioAlgorithmLegacy and the transformer of BioAlgorithmLegacy is also checkpointable
     return isinstance_nested(
         pipeline, "biometric_algorithm", BioAlgorithmCheckpointWrapper
-    ) or \
-    (   isinstance_nested(pipeline, "biometric_algorithm", BioAlgorithmLegacy) and \
-        isinstance_nested(pipeline, "transformer", CheckpointWrapper)
+    ) or (
+        isinstance_nested(pipeline, "biometric_algorithm", BioAlgorithmLegacy)
+        and isinstance_nested(pipeline, "transformer", CheckpointWrapper)
     )
diff --git a/bob/bio/base/pipelines/vanilla_biometrics/zt_norm.py b/bob/bio/base/pipelines/vanilla_biometrics/zt_norm.py
index fbdfaba935b1f326b354c5e87946e30d833f3a6a..10c049d62aca3f51804ea928ac490d693a546793 100644
--- a/bob/bio/base/pipelines/vanilla_biometrics/zt_norm.py
+++ b/bob/bio/base/pipelines/vanilla_biometrics/zt_norm.py
@@ -3,7 +3,7 @@
 # vim: set fileencoding=utf-8 :
 
 Implementation of a pipeline and an algorithm that 
-computes Z, T and ZT Score Normalization of a :any:`BioAlgorithm`
+computes Z, T and ZT Score Normalization of a :any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm`
 """
 
 from bob.pipelines import DelayedSample, Sample, SampleSet, DelayedSampleSet
@@ -17,6 +17,7 @@ import copy
 import joblib
 import logging
 from .pipelines import check_valid_pipeline
+
 logger = logging.getLogger(__name__)
 
 
@@ -265,7 +266,7 @@ class ZTNormPipeline(object):
 
 class ZTNorm(object):
     """
-    Computes Z, T and ZT Score Normalization of a :any:`BioAlgorithm`
+    Computes Z, T and ZT Score Normalization of a `:any:`bob.bio.base.pipelines.vanilla_biometrics.BioAlgorithm`
 
     Reference bibliography from: A Generative Model for Score Normalization in Speaker Recognition
     https://arxiv.org/pdf/1709.09868.pdf
@@ -496,12 +497,12 @@ class ZTNorm(object):
 
 class ZTNormDaskWrapper(object):
     """
-    Wrap :any:`ZTNorm` to work with DASK
+    Wrap `:any:`bob.bio.base.pipelines.vanilla_biometrics.ZTNorm` to work with DASK
 
     Parameters
     ----------
 
-        ztnorm: :any:`ZTNorm`
+        ztnorm: :any:`bob.bio.base.pipelines.vanilla_biometrics.ZTNormPipeline`
             ZTNorm Pipeline
     """
 
@@ -558,12 +559,12 @@ class ZTNormDaskWrapper(object):
 
 class ZTNormCheckpointWrapper(object):
     """
-    Wrap :any:`ZTNorm` to work with DASK
+    Wrap :any:`bob.bio.base.pipelines.vanilla_biometrics.ZTNormPipeline` to work with DASK
 
     Parameters
     ----------
 
-        ztnorm: :any:`ZTNorm`
+        ztnorm: :any:`bob.bio.base.pipelines.vanilla_biometrics.ZTNorm`
             ZTNorm Pipeline
     """
 
@@ -584,11 +585,11 @@ class ZTNormCheckpointWrapper(object):
 
     def write_scores(self, samples, path):
         os.makedirs(os.path.dirname(path), exist_ok=True)
-        #open(path, "wb").write(cloudpickle.dumps(samples))
+        # open(path, "wb").write(cloudpickle.dumps(samples))
         joblib.dump(samples, path, compress=4)
 
     def _load(self, path):
-        #return cloudpickle.loads(open(path, "rb").read())
+        # return cloudpickle.loads(open(path, "rb").read())
         return joblib.load(path)
 
     def _make_name(self, sampleset, biometric_references, for_zt=False):
@@ -613,8 +614,8 @@ class ZTNormCheckpointWrapper(object):
             self.write_scores(z_normed_score.samples, path)
 
         z_normed_score = DelayedSampleSet(
-                functools.partial(self._load, path),
-                parent=probe_score)
+            functools.partial(self._load, path), parent=probe_score
+        )
 
         return z_normed_score
 
@@ -630,8 +631,9 @@ class ZTNormCheckpointWrapper(object):
 
             self.write_scores(t_normed_score.samples, path)
 
-        t_normed_score = DelayedSampleSet(functools.partial(self._load, path),
-                                          parent=probe_score)
+        t_normed_score = DelayedSampleSet(
+            functools.partial(self._load, path), parent=probe_score
+        )
         return t_normed_score
 
     def compute_znorm_scores(
diff --git a/bob/bio/base/test/test_filelist.py b/bob/bio/base/test/test_filelist.py
index dc6ded053d175e3dd6fd7b4d518c72ea1f3efdc2..f91f9da2adfe9f0ce2a7fb1a8bc1eb372141c4a9 100644
--- a/bob/bio/base/test/test_filelist.py
+++ b/bob/bio/base/test/test_filelist.py
@@ -188,6 +188,7 @@ def test_atnt_experiment_cross_validation():
 # Testing the Legacy file list
 ####
 
+
 def test_query():
     db = FileListBioDatabase(
         legacy_example_dir, "test", use_dense_probe_file_list=False
diff --git a/doc/extra-intersphinx.txt b/doc/extra-intersphinx.txt
index 06dcf5f4e801ce70b13529c61503a191a1c1a713..afa9ff94c2127b4d753171100464d4840b5ce6b2 100644
--- a/doc/extra-intersphinx.txt
+++ b/doc/extra-intersphinx.txt
@@ -9,3 +9,4 @@ bob.learn.linear
 gridtk
 bob.db.youtube
 bob.pipelines
+dask
\ No newline at end of file
diff --git a/doc/index.rst b/doc/index.rst
index 420cb1e3ef077cf83e3fad645452d9382c60f594..40d49998c3aa71b86ccbc3e56b0ab474842f26ad 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -97,6 +97,16 @@ Reference Manual
    py_api
 
 
+References
+==========
+
+.. [TP91]    *M. Turk and A. Pentland*. **Eigenfaces for recognition**. Journal of Cognitive Neuroscience, 3(1):71-86, 1991.
+.. [ZKC+98]  *W. Zhao, A. Krishnaswamy, R. Chellappa, D. Swets and J. Weng*. **Discriminant analysis of principal components for face recognition**, pages 73-85. Springer Verlag Berlin, 1998.
+.. [MWP98]   *B. Moghaddam, W. Wahid and A. Pentland*. **Beyond eigenfaces: probabilistic matching for face recognition**. IEEE International Conference on Automatic Face and Gesture Recognition, pages 30-35. 1998.
+.. [GW09]    *M. Günther and R.P. Würtz*. **Face detection and recognition using maximum likelihood classifiers on Gabor graphs**. International Journal of Pattern Recognition and Artificial Intelligence, 23(3):433-461, 2009.
+
+
+
 
 Indices and tables
 ==================
diff --git a/doc/py_api.rst b/doc/py_api.rst
index b0fb15d08173ffbec91bd1be26b4cbbd5e7e25b8..1b1b4f65ad53c8c5a9d245f3b24fb81a2f951b3b 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -181,6 +181,14 @@ Details
 
 .. automodule:: bob.bio.base
 
+.. automodule:: bob.bio.base.annotator
+.. automodule:: bob.bio.base.pipelines
+.. automodule:: bob.bio.base.pipelines.vanilla_biometrics
+.. automodule:: bob.bio.base.database
+.. automodule:: bob.bio.base.preprocessor
+.. automodule:: bob.bio.base.extractor
+.. automodule:: bob.bio.base.transformers
+.. automodule:: bob.bio.base.algorithm
 .. automodule:: bob.bio.base.score.load
 .. automodule:: bob.bio.base.script.figure
 .. automodule:: bob.bio.base.script.commands