diff --git a/bob/pad/face/config/deep_pix_bis.py b/bob/pad/face/config/deep_pix_bis.py
index 19eda97c778acd4fae862f320f12c183c6ab5a00..721679d6a56b731e797f0bdab07350b59d01fe64 100644
--- a/bob/pad/face/config/deep_pix_bis.py
+++ b/bob/pad/face/config/deep_pix_bis.py
@@ -13,7 +13,7 @@ if database is not None:
     annotation_type = database.annotation_type
     fixed_positions = database.fixed_positions
 else:
-    annotation_type = None
+    annotation_type = "eyes-center"
     fixed_positions = None
 
 
@@ -41,6 +41,8 @@ preprocessor = mario.wrap(
 # Classifier #
 classifier = DeepPixBisClassifier(model_file="oulunpu-p1")
 classifier = mario.wrap(["sample"], classifier)
+# change the decision_function
+decision_function = "predict_proba"
 
 
 pipeline = Pipeline(
diff --git a/bob/pad/face/config/replay_attack.py b/bob/pad/face/config/replay_attack.py
index 706afa92e9c4e6a9e618d310c12178b7563f7103..13212ed74604ed36402b620ecc40eb3f0cb95ce4 100644
--- a/bob/pad/face/config/replay_attack.py
+++ b/bob/pad/face/config/replay_attack.py
@@ -1,11 +1,15 @@
-"""`Replayattack`_ is a database for face PAD experiments.
+"""`Replay-Attack`_ is a database for face PAD experiments.
 
-The Replay-Attack Database for face spoofing consists of 1300 video clips of photo and video attack attempts to 50 clients,
-under different lighting conditions. This Database was produced at the Idiap Research Institute, in Switzerland.
-The reference citation is [CAM12]_.
+The Replay-Attack Database for face spoofing consists of 1300 video clips of
+photo and video attack attempts to 50 clients, under different lighting
+conditions. This Database was produced at the Idiap Research Institute, in
+Switzerland. The reference citation is [CAM12]_.
 
-You can download the raw data of the `Replayattack`_ database by following
-the link.
+You can download the raw data of the `Replay-Attack`_ database by following the
+link. After downloading, you can tell the bob library where the files are
+located using::
+
+    $ bob config set bob.db.replayattack.directory /path/to/replayattack/directory
 
 .. include:: links.rst
 """
diff --git a/bob/pad/face/config/svm_frames.py b/bob/pad/face/config/svm_frames.py
index 68a9ea2d15120a914fd7bec1d88ef9616444e23c..73ece6023b3dc9dcc23882e6fe949c4f92148a1d 100644
--- a/bob/pad/face/config/svm_frames.py
+++ b/bob/pad/face/config/svm_frames.py
@@ -27,18 +27,12 @@ classifier = mario.wrap(
     fit_extra_arguments=[("y", "is_bonafide")],
 )
 
-
-# we put video_to_frames and classifier together in a pipeline
-# so that the output of video_to_frames is not checkpointed!
-frames_classifier = Pipeline(
-    [("video_to_frames", VideoToFrames()), ("classifier", classifier)]
-)
-
 # Pipeline #
 pipeline = Pipeline(
     [
         ("preprocessor", preprocessor),
         ("extractor", extractor),
-        ("svm", frames_classifier),
+        ("video_to_frames", VideoToFrames()),
+        ("svm", classifier),
     ]
 )
diff --git a/bob/pad/face/deep_pix_bis.py b/bob/pad/face/deep_pix_bis.py
index 23abe6eefebdffcd8b6d4601dd9bfb7c8c4699a1..c6e976c82e0e6c9013fcc84082943069a83e4c7e 100644
--- a/bob/pad/face/deep_pix_bis.py
+++ b/bob/pad/face/deep_pix_bis.py
@@ -232,8 +232,6 @@ class DeepPixBisClassifier(BaseEstimator, ClassifierMixin):
           The output score is close to 1 for bonafide and 0 for PAs.
         """
         self.load_model()
-        if self.device != torch.self.device("cuda"):
-            raise ValueError("Device {} is not supported.".format(self.device))
 
         tensor_images = []
         for img in images:
diff --git a/bob/pad/face/test/test_deep_pix_bis.py b/bob/pad/face/test/test_deep_pix_bis.py
new file mode 100644
index 0000000000000000000000000000000000000000..7b9588c77500f0aaa12be7618f113ddba399ae15
--- /dev/null
+++ b/bob/pad/face/test/test_deep_pix_bis.py
@@ -0,0 +1,24 @@
+import pkg_resources
+
+import bob.io.base as io
+import bob.pipelines as mario
+
+from bob.bio.video import VideoLikeContainer
+
+
+def _sample_video():
+    path = pkg_resources.resource_filename(
+        "bob.pad.face", "test/data/test_image.png"
+    )
+    img = io.load(path)
+    video = VideoLikeContainer(img, [0])
+    sample = mario.Sample(video, key="sample", annotations=None)
+    return sample
+
+
+def test_pipeline():
+    from bob.pad.face.config.deep_pix_bis import pipeline
+
+    sample = _sample_video()
+    prediction = pipeline.predict_proba([sample])[0]
+    assert prediction.data < 0.04
diff --git a/bob/pad/face/test/test_transformers.py b/bob/pad/face/test/test_transformers.py
index 082301ffede60e0f08a0f948a383dba352c2b8e8..90d3209cf79f205a334f19b6bb21f93b4988492c 100644
--- a/bob/pad/face/test/test_transformers.py
+++ b/bob/pad/face/test/test_transformers.py
@@ -14,6 +14,6 @@ def test_video_to_frames():
     samples = [mario.Sample(v, key=i) for i, v in enumerate(video_container)]
     frame_samples = VideoToFrames().transform(samples)
     assert len(frame_samples) == 4
-    assert all("0_" in s.key for s in frame_samples)
+    assert all(s.key == 0 for s in frame_samples)
     assert [s.data for s in frame_samples] == [0, 1, 2, 3]
     assert [s.frame_id for s in frame_samples] == [0, 1, 2, 4]
diff --git a/bob/pad/face/transformer/VideoToFrames.py b/bob/pad/face/transformer/VideoToFrames.py
index b9982e90be9a78c4e34009d01cb5a3d34272d9e6..f04318bb220d832d0e89a095dde8057a1851c230 100644
--- a/bob/pad/face/transformer/VideoToFrames.py
+++ b/bob/pad/face/transformer/VideoToFrames.py
@@ -23,17 +23,11 @@ class VideoToFrames(TransformerMixin, BaseEstimator):
             for frame, frame_id in zip(video, video.indices):
                 if frame is None:
                     continue
-                kw = (
-                    {"key": f"{sample.key}_{frame_id}"}
-                    if hasattr(sample, "key")
-                    else {}
-                )
                 new_sample = mario.Sample(
                     frame,
                     frame_id=frame_id,
                     annotations=annotations.get(str(frame_id)),
                     parent=sample,
-                    **kw,
                 )
                 output.append(new_sample)
 
diff --git a/doc/baselines.rst b/doc/baselines.rst
index dfd774713082442cffd071340878d5c1cd85fc7a..5c0ae8be533b92fcf83736c505760fb83f6b17e3 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -1,5 +1,3 @@
-
-
 .. _bob.pad.face.baselines:
 
 
@@ -7,9 +5,7 @@
  Executing Baseline Algorithms
 ===============================
 
-This section explains how to execute face presentation attack detection (PAD)
-algorithms implemented in ``bob.pad.face``.
-
+This section explains how to execute face presentation attack detection (PAD).
 
 Running Baseline Experiments
 ----------------------------
@@ -21,7 +17,7 @@ To see the description of the command, you can type in the console:
 
    $ bob pad run-pipeline --help
 
-This command is explained in more detail in :ref:`bob.pad.base <bob.pad.base.features>`.
+This command is explained in more detail in :ref:`bob.pad.base <bob.pad.base.pipeline_intro>`.
 
 Usually, it is a good idea to have at least verbose level 2 (i.e., calling
 ``bob pad run-pipeline --verbose --verbose``, or the short version
@@ -35,14 +31,15 @@ Usually, it is a good idea to have at least verbose level 2 (i.e., calling
    line option. To run experiments in parallel on the local machine, add the
    ``--dask-client local-parallel`` option.
 
-   See :any:`this <pipeline_simple_features>` for more
-   details on dask configurations.
+.. note::
 
+   If you run out of memory, you can try to reduce the dask partition size
+   by setting the ``--dask-partition-size`` option.
 
-Database setups and baselines are encoded using
-``configuration-files``, all stored inside the package structure, in
-the directory ``bob/pad/face/config``. Documentation for each resource
-is available on the section :ref:`bob.pad.face.resources`.
+Database setups and baselines are encoded using ``configuration-files``, all
+stored inside the package structure, in the directory ``bob/pad/face/config``.
+Documentation for each resource is available on the section
+:ref:`bob.pad.face.resources`.
 
 .. warning::
 
@@ -63,8 +60,7 @@ is available on the section :ref:`bob.pad.face.resources`.
 
    .. code-block:: sh
 
-      $ bob config set bob.db.replaymobile.directory /path/to/replayattack-database/
-      $ bob config set bob.db.replaymobile.extension .mov
+      $ bob config set bob.db.replaymobile.directory /path/to/replaymobile-database/
 
    Notice it is rather important to correctly configure the database as
    described above, otherwise ``bob.pad.base`` will not be able to correctly
@@ -81,14 +77,49 @@ Baselines on REPLAY-ATTACK database
 --------------------------------------
 
 This section summarizes the results of baseline face PAD experiments on the
-REPLAY-ATTACK (`replayattack`_) database.
-The description of the database-related settings, which are used to run face PAD
-baselines on the Replay-Attack is given here
-:ref:`bob.pad.face.resources.databases.replay`. To understand the settings in
-more detail you can check the corresponding configuration file:
-``bob/pad/face/config/replay_attack.py``.
+REPLAY-ATTACK (`replayattack`_) database. The description of the
+database-related settings, which are used to run face PAD baselines on the
+Replay-Attack is given here :ref:`bob.pad.face.resources.databases.replay`. To
+understand the settings in more detail you can check the corresponding
+configuration file: ``bob/pad/face/config/replay_attack.py``.
+
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: sh
+
+   $ bob pad run-pipeline -vvv replay-attack deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+
+This baseline reports scores per frame. To obtain scores per video, you can run::
 
+   $ bob pad finalize-scores -vvv <OUTPUT>/scores-{dev,eval}.csv
 
+Finally, you can evaluate this baseline using::
+
+   $ bob pad metrics -vvv --eval <OUTPUT>/scores-{dev,eval}.csv
+
+which should give you::
+
+   [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 1.919391e-01
+   ==============  ==============  ===============
+   ..              Development     Evaluation
+   ==============  ==============  ===============
+   APCER (attack)  32.3%           34.0%
+   APCER_AP        32.3%           34.0%
+   BPCER           31.7%           27.5%
+   ACER            32.0%           30.8%
+   FTA             0.0%            0.0%
+   FPR             32.3% (97/300)  34.0% (136/400)
+   FNR             31.7% (19/60)   27.5% (22/80)
+   HTER            32.0%           30.8%
+   FAR             32.3%           34.0%
+   FRR             31.7%           27.5%
+   PRECISION       0.3             0.3
+   RECALL          0.7             0.7
+   F1_SCORE        0.4             0.4
+   AUC             0.7             0.7
+   AUC-LOG-SCALE   1.5             1.4
+   ==============  ==============  ===============
 
 
 .. _bob.pad.face.baselines.replay_mobile:
@@ -96,9 +127,104 @@ more detail you can check the corresponding configuration file:
 Baselines on REPLAY-MOBILE database
 --------------------------------------
 
-This section summarizes the results of baseline face PAD experiments on the `Replay-Mobile`_ database.
-The description of the database-related settings, which are used to run face PAD baselines on the Replay-Mobile is given here :ref:`bob.pad.face.resources.databases.replay_mobile`. To understand the settings in more detail you can check the corresponding configuration file : ``bob/pad/face/config/replay_mobile.py``.
+This section summarizes the results of baseline face PAD experiments on the
+`Replay-Mobile`_ database. The description of the database-related settings,
+which are used to run face PAD baselines on the Replay-Mobile is given here
+:ref:`bob.pad.face.resources.databases.replay_mobile`. To understand the
+settings in more detail you can check the corresponding configuration file :
+``bob/pad/face/config/replay_mobile.py``.
+
+
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: sh
+
+   $ bob pad run-pipeline -vv replay-mobile deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+
+This baseline reports scores per frame. To obtain scores per video, you can run::
+
+   $ bob pad finalize-scores -vv <OUTPUT>/scores-{dev,eval}.csv
+
+Finally, you can evaluate this baseline using::
+
+   $ bob pad metrics -vv --eval <OUTPUT>/scores-{dev,eval}.csv
+
+which should give you::
+
+   [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 4.051177e-01
+   ===================  ==============  ==============
+   ..                   Development     Evaluation
+   ===================  ==============  ==============
+   APCER (mattescreen)  4.7%            8.3%
+   APCER (print)        15.6%           18.8%
+   APCER_AP             15.6%           18.8%
+   BPCER                10.0%           10.9%
+   ACER                 12.8%           14.8%
+   FTA                  0.0%            0.0%
+   FPR                  10.2% (26/256)  13.5% (26/192)
+   FNR                  10.0% (16/160)  10.9% (12/110)
+   HTER                 10.1%           12.2%
+   FAR                  10.2%           13.5%
+   FRR                  10.0%           10.9%
+   PRECISION            0.8             0.8
+   RECALL               0.9             0.9
+   F1_SCORE             0.9             0.8
+   AUC                  1.0             1.0
+   AUC-LOG-SCALE        2.0             1.8
+   ===================  ==============  ==============
+
+
+.. _bob.pad.face.baselines.oulunpu:
+
+Baselines on OULU-NPU database
+--------------------------------------
+
+This section summarizes the results of baseline face PAD experiments on the
+`OULU-NPU`_ database. The description of the database-related settings,
+which are used to run face PAD baselines on the OULU-NPU is given here
+:ref:`bob.pad.face.resources.databases.oulunpu`. To understand the
+settings in more detail you can check the corresponding configuration file :
+``bob/pad/face/config/oulunpu.py``.
+
 
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+.. code-block:: sh
 
+   $ bob pad run-pipeline -vv oulunpu deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+
+This baseline reports scores per frame. To obtain scores per video, you can run::
+
+   $ bob pad finalize-scores -vv <OUTPUT>/scores-{dev,eval}.csv
+
+Finally, you can evaluate this baseline using::
+
+   $ bob pad metrics -vv --eval <OUTPUT>/scores-{dev,eval}.csv
+
+which should give you::
+
+   [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 4.051177e-01
+   ===================  ==============  ==============
+   ..                   Development     Evaluation
+   ===================  ==============  ==============
+   APCER (mattescreen)  4.7%            8.3%
+   APCER (print)        15.6%           18.8%
+   APCER_AP             15.6%           18.8%
+   BPCER                10.0%           10.9%
+   ACER                 12.8%           14.8%
+   FTA                  0.0%            0.0%
+   FPR                  10.2% (26/256)  13.5% (26/192)
+   FNR                  10.0% (16/160)  10.9% (12/110)
+   HTER                 10.1%           12.2%
+   FAR                  10.2%           13.5%
+   FRR                  10.0%           10.9%
+   PRECISION            0.8             0.8
+   RECALL               0.9             0.9
+   F1_SCORE             0.9             0.8
+   AUC                  1.0             1.0
+   AUC-LOG-SCALE        2.0             1.8
+   ===================  ==============  ==============
 
 .. include:: links.rst
diff --git a/doc/conf.py b/doc/conf.py
index d3170f750d53faac2a1feeb781221cedd9425292..1e948f6adc3a5a689a153e9411c845332de034f7 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -27,7 +27,7 @@ extensions = [
 ]
 
 # Be picky about warnings
-nitpicky = False
+nitpicky = True
 
 # Ignores stuff we can't easily resolve on other project's sphinx manuals
 nitpick_ignore = []
diff --git a/doc/index.rst b/doc/index.rst
index 5c4200195e802638bdefa466eee700210cff16a2..d07d2606da99b32b76eef6ae5258ba55660fd330 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -7,8 +7,8 @@ Library for Facial Presentation Attack Detection (PAD)
 ========================================================
 
 The Facial Presentation Attack Detection Library is an open-source tool consisting of a
-series of plugins for bob.pad.base_, our open-source presentation attack detection
-platform. As a result, it is fully extensible using bob.pad.base_ documented
+series of plugins for :ref:`bob.pad.base`, our open-source presentation attack detection
+platform. As a result, it is fully extensible using :ref:`bob.pad.base` documented
 types and techniques. Please refer to the manual of that package for a thorough
 introduction. In this guide, we focus on details concerning
 face PAD experiments using our plugins.
diff --git a/doc/installation.rst b/doc/installation.rst
index 6c58ee2b689d36b628499440c4b701849899ed8e..5758a5109b26b87c9ae557b784f434ac0232463f 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -23,19 +23,16 @@ the baselines.
 
 The current system readily supports the following freely available datasets:
 
-* `REPLAYATTACK`_
+* `REPLAY-ATTACK`_
 * `REPLAY-MOBILE`_
+* `SWAN`_
+* `OULU-NPU`_
+* `MASK-ATTACK`_
 
 After downloading the databases, annotate the base directories in which they
 are installed. Then, follow the instructions in
 :ref:`bob.pad.base.installation` to let this framework know where databases are
 located on your system.
 
-.. note::
-
-    Some databases may need to be configured using a newer method explained in
-    :ref:`bob.extension.rc`. Refer to the documentation of the database for
-    further information.
-
 
 .. include:: links.rst
diff --git a/doc/links.rst b/doc/links.rst
index 5c51ec6cc7d0bb7cb2a4e4034d8220206c431ef6..3270fb22dd659f4c051da62bc579be65d1f797e1 100644
--- a/doc/links.rst
+++ b/doc/links.rst
@@ -8,11 +8,13 @@
 .. _buildout: http://www.buildout.org
 .. _pypi: http://pypi.python.org
 .. _installation: https://www.idiap.ch/software/bob/install
-.. _bob.pad.base: https://pypi.python.org/pypi/bob.pad.base
-.. _replayattack: https://www.idiap.ch/dataset/replayattack
+.. _replay-attack: https://www.idiap.ch/dataset/replayattack
 .. _replay-mobile: https://www.idiap.ch/dataset/replay-mobile
 .. _dependencies: https://gitlab.idiap.ch/bob/bob/wikis/Dependencies
 .. _MIFS: http://www.antitza.com/makeup-datasets.html
 .. _CELEBA: http://mmlab.ie.cuhk.edu.hk/projects/CelebA.html
 .. _Swan: https://www.idiap.ch/dataset/swan
 .. _oulu-npu: https://sites.google.com/site/oulunpudatabase/
+.. _casia-fasd: http://www.cbsr.ia.ac.cn/english/FaceAntiSpoofDatabases.asp
+.. _casia-surf: https://sites.google.com/qq.com/face-anti-spoofing/welcome/challengecvpr2019
+.. _mask-attack: https://www.idiap.ch/en/dataset/3dmad/index_html
diff --git a/setup.py b/setup.py
index a9296ae6020248aa2f31556173defa9716fce51f..9e8487416a6d4628eec4511825035fd3f9ba5ad2 100644
--- a/setup.py
+++ b/setup.py
@@ -64,6 +64,11 @@ setup(
             "swan = bob.pad.face.config.swan:database",
             "oulunpu = bob.pad.face.config.oulunpu:database",
         ],
+        # registered pipelines:
+        "bob.pad.pipeline": [
+            "svm-frames = bob.pad.face.config.svm_frames:pipeline",
+            "deep-pix-bis = bob.pad.face.config.deep_pix_bis:pipeline",
+        ],
         # registered configurations:
         "bob.pad.config": [
             # databases
@@ -75,8 +80,9 @@ setup(
             "casiasurf = bob.pad.face.config.casiasurf",
             "swan = bob.pad.face.config.swan",
             "oulunpu = bob.pad.face.config.oulunpu",
-            # classifiers
+            # pipelines
             "svm-frames = bob.pad.face.config.svm_frames",
+            "deep-pix-bis = bob.pad.face.config.deep_pix_bis",
         ],
         # registered ``bob pad ...`` commands
         "bob.pad.cli": [