diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
new file mode 100644
index 0000000000000000000000000000000000000000..1649e702ad52b70c49cd51a359833c3eb92aaad0
--- /dev/null
+++ b/bob/pad/face/config/lbp_svm.py
@@ -0,0 +1,81 @@
+import dask_ml.model_selection as dcv
+
+from sklearn.model_selection import StratifiedGroupKFold
+from sklearn.pipeline import Pipeline
+from sklearn.svm import SVC
+
+from bob.bio.face.annotator import MTCNN
+from bob.bio.face.preprocessor import INormLBP
+from bob.bio.face.utils import make_cropper, pad_default_cropping
+from bob.pad.face.transformer import VideoToFrames
+from bob.pad.face.transformer.histogram import SpatialHistogram
+from bob.pipelines.wrappers import SampleWrapper
+
+
+def _init_pipeline(database, crop_size=(112, 112), grid_size=(3, 3)):
+    # Face Crop
+    # --------------------------
+    annotator = MTCNN(thresholds=(0.1, 0.2, 0.2))
+    cropped_pos = pad_default_cropping(crop_size, database.annotation_type)
+    cropper = make_cropper(
+        cropped_image_size=crop_size,
+        cropped_positions=cropped_pos,
+        fixed_positions=database.fixed_positions,
+        color_channel="rgb",
+        annotator=annotator,
+    )
+    face_cropper = SampleWrapper(
+        cropper[0], transform_extra_arguments=cropper[1], delayed_output=False
+    )
+
+    # Extract LBP
+    # --------------------------
+    lbp_extractor = INormLBP(face_cropper=None, color_channel="gray")
+    lbp_extractor = SampleWrapper(lbp_extractor, delayed_output=False)
+
+    # Histogram
+    # --------------------------
+    histo = SpatialHistogram(grid_size=grid_size, nbins=256)
+    # histo = VideoWrapper(histo)
+    histo = SampleWrapper(histo, delayed_output=False)
+
+    # Classifier
+    # --------------------------
+    sk_classifier = SVC()
+    param_grid = [
+        {
+            "C": [2**p for p in range(-3, 14, 2)],
+            "gamma": [2**p for p in range(-15, 0, 2)],
+            "kernel": ["rbf"],
+        }
+    ]
+    cv = StratifiedGroupKFold(n_splits=3)
+    sk_classifier = dcv.GridSearchCV(
+        sk_classifier, param_grid=param_grid, cv=cv
+    )
+    fit_extra_arguments = [("y", "is_bonafide"), ("groups", "video_key")]
+    classifier = SampleWrapper(
+        sk_classifier,
+        delayed_output=False,
+        fit_extra_arguments=fit_extra_arguments,
+    )
+
+    # Full Pipeline
+    # --------------------------
+    return Pipeline(
+        [
+            ("video2frames", VideoToFrames()),
+            ("cropper", face_cropper),
+            ("lbp", lbp_extractor),
+            ("spatial_histogram", histo),
+            ("classifier", classifier),
+        ]
+    )
+
+
+# Get database information, needed for face cropper
+db = globals()["database"]
+if db is None:
+    raise ValueError("Missing database!")
+# Pipeline #
+pipeline = _init_pipeline(database=db)
diff --git a/bob/pad/face/config/svm_frames.py b/bob/pad/face/config/svm_frames.py
deleted file mode 100644
index ebe9f1063db1185d6ae236e3e32662b6b7446371..0000000000000000000000000000000000000000
--- a/bob/pad/face/config/svm_frames.py
+++ /dev/null
@@ -1,43 +0,0 @@
-from sklearn.model_selection import GridSearchCV
-from sklearn.pipeline import Pipeline
-from sklearn.svm import SVC
-
-import bob.pipelines as mario
-
-from bob.pad.face.transformer import VideoToFrames
-
-preprocessor = globals()["preprocessor"]
-extractor = globals()["extractor"]
-
-# Classifier #
-param_grid = [
-    {
-        "C": [2**P for P in range(-3, 14, 2)],
-        "gamma": [2**P for P in range(-15, 0, 2)],
-        "kernel": ["rbf"],
-    },
-]
-
-
-# TODO: The grid search below does not take into account splitting frames of
-# each video into a separate group. You might have frames of the same video in
-# both groups of training and validation.
-
-# TODO: This gridsearch can also be part of dask graph using dask-ml and the
-# ``bob_fit_supports_dask_array`` tag from bob.pipelines.
-classifier = GridSearchCV(SVC(), param_grid=param_grid, cv=3)
-classifier = mario.wrap(
-    ["sample"],
-    classifier,
-    fit_extra_arguments=[("y", "is_bonafide")],
-)
-
-# Pipeline #
-pipeline = Pipeline(
-    [
-        ("preprocessor", preprocessor),
-        ("extractor", extractor),
-        ("video_to_frames", VideoToFrames()),
-        ("svm", classifier),
-    ]
-)
diff --git a/bob/pad/face/database/database.py b/bob/pad/face/database/database.py
index d779906ff0432acd411e67085294c087cb2206c7..0c5d53d774801f8b7826c35cd16958a6503fb7c5 100644
--- a/bob/pad/face/database/database.py
+++ b/bob/pad/face/database/database.py
@@ -38,7 +38,7 @@ def delayed_video_load(
             step_size=step_size,
             transform=get_transform(sample),
         )
-        delayed_attributes = None
+        delayed_attributes = {"annotations": lambda: None}
         if annotation_directory:
             path = sample.filename
             if not keep_extension_for_annotation:
diff --git a/bob/pad/face/test/test_transformers.py b/bob/pad/face/test/test_transformers.py
index 90d3209cf79f205a334f19b6bb21f93b4988492c..641c3deb5d5124e58a256becac303b115abbf3fc 100644
--- a/bob/pad/face/test/test_transformers.py
+++ b/bob/pad/face/test/test_transformers.py
@@ -14,6 +14,9 @@ def test_video_to_frames():
     samples = [mario.Sample(v, key=i) for i, v in enumerate(video_container)]
     frame_samples = VideoToFrames().transform(samples)
     assert len(frame_samples) == 4
-    assert all(s.key == 0 for s in frame_samples)
+    assert all(
+        s.key == "0_{}".format(k) for s, k in zip(frame_samples, [0, 1, 2, 4])
+    )
+    assert all(s.video_key == 0 for s in frame_samples)
     assert [s.data for s in frame_samples] == [0, 1, 2, 3]
     assert [s.frame_id for s in frame_samples] == [0, 1, 2, 4]
diff --git a/bob/pad/face/transformer/VideoToFrames.py b/bob/pad/face/transformer/VideoToFrames.py
index 8a1bdc5eb31131b9c70a25cc1e9ec865e8371ab9..8db36bdf66886928edcbbacdf955c990f12ec513 100644
--- a/bob/pad/face/transformer/VideoToFrames.py
+++ b/bob/pad/face/transformer/VideoToFrames.py
@@ -4,8 +4,7 @@ from functools import partial
 
 from sklearn.base import BaseEstimator, TransformerMixin
 
-import bob.pipelines as mario
-
+from bob.pipelines.sample import DelayedSample, Sample
 from bob.pipelines.wrappers import _frmt
 
 logger = logging.getLogger(__name__)
@@ -18,33 +17,70 @@ def _get(sth):
 class VideoToFrames(TransformerMixin, BaseEstimator):
     """Expands video samples to frame-based samples only when transform is called."""
 
+    def __init__(self, delayed_output=True):
+        self.delayed_output = delayed_output
+
     def transform(self, video_samples):
         logger.debug(f"{_frmt(self)}.transform")
-        output = []
-        for sample in video_samples:
-            annotations = getattr(sample, "annotations", {}) or {}
-
+        outputs = []
+        for vid_sample in video_samples:
+            annotations = getattr(vid_sample, "annotations", None)
+            # Define groups with `sample.key`` since we need a unique ID for
+            # each video. The `groups` attribute is used to do cross-validation
+            if not hasattr(vid_sample, "key"):
+                raise ValueError(
+                    "Video sample must have a unique `key` "
+                    "attribute to be used with {}".format(
+                        self.__class__.__name__
+                    )
+                )
+            groups = vid_sample.key
             # video is an instance of VideoAsArray or VideoLikeContainer
-            video = sample.data
-
+            video = vid_sample.data
             for frame, frame_id in zip(video, video.indices):
                 if frame is None:
                     continue
-                # create a load method so that we can create DelayedSamples because
-                # the input samples could be DelayedSamples with delayed attributes
-                # as well and we don't want to load those delayed attributes.
-                new_sample = mario.DelayedSample(
-                    partial(_get, frame),
-                    frame_id=frame_id,
-                    annotations=annotations.get(str(frame_id)),
-                    parent=sample,
-                )
-                output.append(new_sample)
-
-        return output
+                # Do we have frame annotations?
+                frame_annotations = None
+                if annotations is not None:
+                    # Global annotation are present -> query them
+                    frame_annotations = annotations.get(str(frame_id))
+                # Update key, otherwise get the one from parent and each frames
+                # get the same one, breaking checkpoint mechanic for steps
+                # later down the pipelines
+                key = "{}_{}".format(vid_sample.key, frame_id)
+                if self.delayed_output:
+                    # create a load method so that we can create DelayedSamples
+                    # because the input samples could be DelayedSamples with
+                    # delayed attributes as well and we don't want to load
+                    # those delayed attributes.
+                    sample = DelayedSample(
+                        partial(_get, frame),
+                        frame_id=frame_id,
+                        video_key=groups,
+                        parent=vid_sample,
+                        # Override parent's attributes
+                        annotations=frame_annotations,
+                        key=key,
+                    )
+                else:
+                    sample = Sample(
+                        frame,
+                        frame_id=frame_id,
+                        video_key=groups,
+                        parent=vid_sample,
+                        # Override parent's attributes
+                        annotations=frame_annotations,
+                        key=key,
+                    )
+                outputs.append(sample)
+        return outputs
 
     def fit(self, X, y=None):
         return self
 
     def _more_tags(self):
-        return {"requires_fit": False, "bob_checkpoint_features": False}
+        return {
+            "requires_fit": False,
+            "bob_checkpoint_features": False,
+        }
diff --git a/bob/pad/face/transformer/histogram.py b/bob/pad/face/transformer/histogram.py
new file mode 100644
index 0000000000000000000000000000000000000000..a3bc4ff7d1724839d3d70900c935bcfed37ce48b
--- /dev/null
+++ b/bob/pad/face/transformer/histogram.py
@@ -0,0 +1,75 @@
+import numpy as np
+
+from sklearn.base import BaseEstimator, TransformerMixin
+from sklearn.utils import check_array
+
+
+def _get_cropping_size(image_size, patch_size):
+    # How many pixels missing to cover the whole image
+    r = image_size % patch_size
+    # Spit gap into two evenly
+    before = r // 2
+    after = image_size - (r - before)
+    return before, after
+
+
+def _extract_patches(image, patch_size):
+    # https://stackoverflow.com/a/16858283
+    h, w = image.shape
+    nrows, ncols = patch_size
+    if h % nrows != 0 or w % ncols != 0:
+        w_left, w_right = _get_cropping_size(w, ncols)
+        h_top, h_bottom = _get_cropping_size(h, nrows)
+        # Perform center crop
+        image = image[h_top:h_bottom, w_left:w_right]
+    return (
+        image.reshape(h // nrows, nrows, -1, ncols)
+        .swapaxes(1, 2)
+        .reshape(-1, nrows, ncols)
+    )
+
+
+class SpatialHistogram(TransformerMixin, BaseEstimator):
+    """
+    Split images into a grid of patches, compute histogram on each one of them
+    and concatenate them to obtain the final descriptor.
+    """
+
+    def __init__(self, grid_size=(4, 4), range=(0, 256), nbins=256):
+        """
+        Constructor
+        :param grid_size: Tuple `(grid_y, grid_x)` indicating the number of
+            patches to extract in each directions
+        :param range: Tuple `(h_min, h_max)` indicating the histogram range.
+            cf numpy.histogram
+        :param nbins: Number of bins in the histogram, cf numpy.histogram
+        """
+        self.grid_size = grid_size
+        self.range = range
+        self.nbins = nbins
+
+    def fit(self, X, y):
+        return self
+
+    def transform(self, X):
+        X = check_array(X, allow_nd=True)  # X.shape == (N, H, W)
+        histo = []
+        for sample in X:
+            h = self._spatial_histogram(sample)  # [grid_x * grid_y * nbins]
+            histo.append(h)
+        return np.asarray(histo)
+
+    def _spatial_histogram(self, image):
+        """Compute spatial histogram for a given images"""
+        patch_size = [s // g for s, g in zip(image.shape, self.grid_size)]
+        patches = _extract_patches(image=image, patch_size=patch_size)
+        hist = []
+        for patch in patches:
+            h, _ = np.histogram(
+                patch, bins=self.nbins, range=self.range, density=True
+            )
+            hist.append(h)
+        return np.asarray(hist).reshape(-1)
+
+    def _more_tags(self):
+        return {"stateless": True, "requires_fit": False}
diff --git a/doc/baselines.rst b/doc/baselines.rst
index b5a6b19ff6c24134f221656c6a40b4312db24bb5..578ff1bf15d5dbbdf18cda5e025dc81476388b12 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -83,14 +83,16 @@ Replay-Attack is given here :ref:`bob.pad.face.resources.databases.replay_attack
 understand the settings in more detail you can check the corresponding
 configuration file: ``bob/pad/face/config/replay_attack.py``.
 
-Deep-Pix-BiS Baseline
+Running Baseline
 ~~~~~~~~~~~~~~~~~~~~~
-(see :ref:`bob.pad.face.resources.deep_pix_bis_pad`)
+
+The baseline is run by invoking the command below
 
 .. code-block:: sh
 
-   $ bob pad run-pipeline -vvv replay-attack deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+   $ bob pad run-pipeline -vvv replay-attack <BASELINE> --output <OUTPUT> --dask-client <CLIENT>
 
+where ``<BASELINE>`` can be any of the following: ``lbp-svm``, ``deep-pix-bis``.
 This baseline reports scores per frame. To obtain scores per video, you can run::
 
    $ bob pad finalize-scores -vvv <OUTPUT>/scores-{dev,eval}.csv
@@ -99,7 +101,38 @@ Finally, you can evaluate this baseline using::
 
    $ bob pad metrics -vvv --eval <OUTPUT>/scores-{dev,eval}.csv
 
-which should give you::
+LBP-SVM Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+The metrics for this baseline should give you::
+
+   [Min. criterion: EER ] Threshold on Development set ``<OUTPUT>/scores-dev.csv`: -1.042440e+00
+   ==============  ==============  ==============
+   ..              Development     Evaluation
+   ==============  ==============  ==============
+   APCER (attack)  20.7%           19.5%
+   APCER_AP        20.7%           19.5%
+   BPCER           20.0%           13.8%
+   ACER            20.3%           16.6%
+   FTA             0.0%            0.0%
+   FPR             20.7% (62/300)  19.5% (78/400)
+   FNR             20.0% (12/60)   13.8% (11/80)
+   HTER            20.3%           16.6%
+   FAR             20.7%           19.5%
+   FRR             20.0%           13.8%
+   PRECISION       0.4             0.5
+   RECALL          0.8             0.9
+   F1_SCORE        0.6             0.6
+   AUC             0.9             0.9
+   AUC-LOG-SCALE   1.4             1.5
+   ==============  ==============  ==============
+
+
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
+(see :ref:`bob.pad.face.resources.deep_pix_bis_pad`)
+
+The metrics for this baseline should give you::
 
    [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 1.919391e-01
    ==============  ==============  ===============
@@ -136,22 +169,57 @@ settings in more detail you can check the corresponding configuration file :
 ``bob/pad/face/config/replay_mobile.py``.
 
 
-Deep-Pix-BiS Baseline
+Running Baseline
 ~~~~~~~~~~~~~~~~~~~~~
 
+The baseline is run by invoking the command below
+
 .. code-block:: sh
 
-   $ bob pad run-pipeline -vv replay-mobile deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+   $ bob pad run-pipeline -vvv replay-mobile <BASELINE> --output <OUTPUT> --dask-client <CLIENT>
 
+where ``<BASELINE>`` can be any of the following: ``lbp-svm``, ``deep-pix-bis``.
 This baseline reports scores per frame. To obtain scores per video, you can run::
 
-   $ bob pad finalize-scores -vv <OUTPUT>/scores-{dev,eval}.csv
+   $ bob pad finalize-scores -vvv <OUTPUT>/scores-{dev,eval}.csv
 
 Finally, you can evaluate this baseline using::
 
-   $ bob pad metrics -vv --eval <OUTPUT>/scores-{dev,eval}.csv
+   $ bob pad metrics -vvv --eval <OUTPUT>/scores-{dev,eval}.csv
 
-which should give you::
+
+LBP-SVM Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+The metrics for this baseline should give you::
+
+   [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: -5.045229e-01
+   ===================  ==============  ==============
+   ..                   Development     Evaluation
+   ===================  ==============  ==============
+   APCER (mattescreen)  7.0%            7.3%
+   APCER (print)        9.4%            3.1%
+   APCER_AP             9.4%            7.3%
+   BPCER                8.1%            0.9%
+   ACER                 8.8%            4.1%
+   FTA                  0.0%            0.0%
+   FPR                  8.2% (21/256)   5.2% (10/192)
+   FNR                  8.1% (13/160)   0.9% (1/110)
+   HTER                 8.2%            3.1%
+   FAR                  8.2%            5.2%
+   FRR                  8.1%            0.9%
+   PRECISION            0.9             0.9
+   RECALL               0.9             1.0
+   F1_SCORE             0.9             1.0
+   AUC                  1.0             1.0
+   AUC-LOG-SCALE        2.2             2.2
+   ===================  ==============  ==============
+
+
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+The metrics for this baseline should give you::
 
    [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 4.051177e-01
    ===================  ==============  ==============
@@ -189,22 +257,59 @@ settings in more detail you can check the corresponding configuration file :
 ``bob/pad/face/config/oulu_npu.py``.
 
 
-Deep-Pix-BiS Baseline
+Running Baseline
 ~~~~~~~~~~~~~~~~~~~~~
 
+The baseline is run by invoking the command below
+
 .. code-block:: sh
 
-   $ bob pad run-pipeline -vv oulu-npu deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+   $ bob pad run-pipeline -vvv oulu-npu <BASELINE> --output <OUTPUT> --dask-client <CLIENT>
 
+where ``<BASELINE>`` can be any of the following: ``lbp-svm``, ``deep-pix-bis``.
 This baseline reports scores per frame. To obtain scores per video, you can run::
 
-   $ bob pad finalize-scores -vv <OUTPUT>/scores-{dev,eval}.csv
+   $ bob pad finalize-scores -vvv <OUTPUT>/scores-{dev,eval}.csv
 
 Finally, you can evaluate this baseline using::
 
-   $ bob pad metrics -vv --eval <OUTPUT>/scores-{dev,eval}.csv
+   $ bob pad metrics -vvv --eval <OUTPUT>/scores-{dev,eval}.csv
+
+
+LBP-SVM Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+The metrics for this baseline should give you::
+
+   [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 6.161214e-02
+   ======================  =============  ============
+   ..                      Development    Evaluation
+   ======================  =============  ============
+   APCER (print/1)         13.3%          10.0%
+   APCER (print/2)         11.1%          6.7%
+   APCER (video_replay/1)  4.4%           5.8%
+   APCER (video_replay/2)  8.3%           11.7%
+   APCER_AP                13.3%          11.7%
+   BPCER                   9.4%           57.5%
+   ACER                    11.4%          34.6%
+   FTA                     0.0%           0.0%
+   FPR                     9.3% (67/720)  8.5% (41/480)
+   FNR                     9.4% (17/180)  57.5% (69/120)
+   HTER                    9.4%           33.0%
+   FAR                     9.3%           8.5%
+   FRR                     9.4%           57.5%
+   PRECISION               0.7            0.6
+   RECALL                  0.9            0.4
+   F1_SCORE                0.8            0.5
+   AUC                     1.0            0.8
+   AUC-LOG-SCALE           2.1            1.1
+   ======================  =============  ============
+
 
-which should give you::
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+The metrics for this baseline should give you::
 
    [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 4.326179e-01
    ======================  =============  ============
@@ -244,22 +349,58 @@ settings in more detail you can check the corresponding configuration file :
 ``bob/pad/face/config/swan.py``.
 
 
-Deep-Pix-BiS Baseline
+Running Baseline
 ~~~~~~~~~~~~~~~~~~~~~
 
+The baseline is run by invoking the command below
+
 .. code-block:: sh
 
-   $ bob pad run-pipeline -vv swan deep-pix-bis --output <OUTPUT> --dask-client <CLIENT>
+   $ bob pad run-pipeline -vvv swan <BASELINE> --output <OUTPUT> --dask-client <CLIENT>
 
+where ``<BASELINE>`` can be any of the following: ``lbp-svm``, ``deep-pix-bis``.
 This baseline reports scores per frame. To obtain scores per video, you can run::
 
-   $ bob pad finalize-scores -vv <OUTPUT>/scores-{dev,eval}.csv
+   $ bob pad finalize-scores -vvv <OUTPUT>/scores-{dev,eval}.csv
 
 Finally, you can evaluate this baseline using::
 
-   $ bob pad metrics -vv --eval <OUTPUT>/scores-{dev,eval}.csv
+   $ bob pad metrics -vvv --eval <OUTPUT>/scores-{dev,eval}.csv
+
+
+LBP-SVM Baseline
+~~~~~~~~~~~~~~~~~~~~~
+
+The metrics for this baseline should give you::
+
+   [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 9.408851e-02
+   ======================  =============  ============
+   ..                      Development    Evaluation
+   ======================  =============  ============
+   APCER (PA.F.1)          6.7%           11.1%
+   APCER (PA.F.5)          0.8%           0.8%
+   APCER (PA.F.6)          11.2%          10.8%
+   APCER_AP                11.2%          11.1%
+   BPCER                   6.0%           25.2%
+   ACER                    8.6%           18.2%
+   FTA                     0.0%           0.0%
+   FPR                     6.0% (30/502)  6.0% (45/749)
+   FNR                     6.0% (18/300)  25.2% (568/2250)
+   HTER                    6.0%           15.6%
+   FAR                     6.0%           6.0%
+   FRR                     6.0%           25.2%
+   PRECISION               0.9            1.0
+   RECALL                  0.9            0.7
+   F1_SCORE                0.9            0.8
+   AUC                     1.0            1.0
+   AUC-LOG-SCALE           2.4            2.0
+   ======================  =============  ============
+
+
+Deep-Pix-BiS Baseline
+~~~~~~~~~~~~~~~~~~~~~
 
-which should give you::
+The metrics for this baseline should give you::
 
    [Min. criterion: EER ] Threshold on Development set `<OUTPUT>/scores-dev.csv`: 4.867174e-01
    ==============  ==============  ================
diff --git a/doc/other_pad_algorithms.rst b/doc/other_pad_algorithms.rst
index 9feb8a3b1e1fd0911a3105e451e534ef90458617..1429d88d953a715d5266809e02efea4cee820d54 100644
--- a/doc/other_pad_algorithms.rst
+++ b/doc/other_pad_algorithms.rst
@@ -43,10 +43,11 @@ Usually, it is a good idea to have at least verbose level 2 (i.e., calling
    line option. To run experiments in parallel on the local machine, add the
    ``--dask-client local-parallel`` option.
 
-   See :any:`this <bob.bio.base.pipeline_simple_advanced_features>` for more for more
+   See :any:`this <bob.bio.base.pipeline_simple_advanced_features>` for more
    details on dask configurations.
 
 
+
 Database setups and face PAD algorithms are encoded using
 ``bob.bio.base.configuration-files``, all stored inside the package structure,
 in the directory ``bob/pad/face/config``. Documentation for each resource
diff --git a/setup.py b/setup.py
index 8a9cc734f7f4c434bb287d46a20f54addf8cafdb..16986887d71e5363f5e75324e282b06bae028f8b 100644
--- a/setup.py
+++ b/setup.py
@@ -79,7 +79,7 @@ setup(
             "replay-mobile = bob.pad.face.config.replay_mobile",
             "swan = bob.pad.face.config.swan",
             # pipelines
-            "svm-frames = bob.pad.face.config.svm_frames",
+            "lbp-svm = bob.pad.face.config.lbp_svm",
             "deep-pix-bis = bob.pad.face.config.deep_pix_bis",
         ],
         # registered ``bob pad ...`` commands