diff --git a/bob/bio/face/config/baseline/arcface_insight_tf.py b/bob/bio/face/config/baseline/arcface_insight_tf.py
index a688872a014ad86fc32289c0432331a11e2ad86d..f9c059c1ec68697a10c92b0390d53d6d4a567814 100644
--- a/bob/bio/face/config/baseline/arcface_insight_tf.py
+++ b/bob/bio/face/config/baseline/arcface_insight_tf.py
@@ -1,5 +1,7 @@
 from bob.bio.face.embeddings import ArcFace_InsightFaceTF
 from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
+from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
+
 
 
 if "database" in locals():
@@ -11,3 +13,11 @@ else:
 
 
 transformer = embedding_transformer_112x112(ArcFace_InsightFaceTF(), annotation_type, fixed_positions)
+
+algorithm = Distance()
+
+pipeline = VanillaBiometricsPipeline(
+    transformer,
+    algorithm
+)
+
diff --git a/bob/bio/face/config/baseline/gabor_graph.py b/bob/bio/face/config/baseline/gabor_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..a7e08de12bec6f189b6ad3b7afc086ec7088e5b8
--- /dev/null
+++ b/bob/bio/face/config/baseline/gabor_graph.py
@@ -0,0 +1,104 @@
+from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline, BioAlgorithmLegacy
+from bob.bio.face.helpers import face_crop_solver
+import math
+import numpy as np
+import bob.bio.face
+from sklearn.pipeline import make_pipeline
+from bob.pipelines import wrap
+import tempfile
+
+#### SOLVING IF THERE'S ANY DATABASE INFORMATION
+if "database" in locals():
+    annotation_type = database.annotation_type
+    fixed_positions = database.fixed_positions
+else:
+    annotation_type = None
+    fixed_positions = None
+
+
+####### SOLVING THE FACE CROPPER TO BE USED ##########
+
+# Cropping
+CROPPED_IMAGE_HEIGHT = 80
+CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+
+# eye positions for frontal images
+RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
+LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
+
+cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+color_channel = "gray"
+
+
+if annotation_type == "bounding-box":
+    transform_extra_arguments = (("annotations", "annotations"),)
+    TOP_LEFT_POS = (0, 0)
+    BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+
+    # Detects the face and crops it without eye detection
+    face_cropper = face_crop_solver(
+        cropped_image_size,
+        color_channel=color_channel,
+        cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
+        fixed_positions=fixed_positions,
+    )
+
+elif annotation_type == "eyes-center":
+    transform_extra_arguments = (("annotations", "annotations"),)
+    # eye positions for frontal images
+
+    # Detects the face and crops it without eye detection
+    face_cropper = face_crop_solver(
+        cropped_image_size,
+        color_channel=color_channel,
+        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
+        fixed_positions=fixed_positions,
+    )
+
+else:
+    transform_extra_arguments = None
+    # DEFAULT TO FACE SIMPLE RESIZE
+    face_cropper = face_crop_solver(cropped_image_size)
+
+preprocessor = bob.bio.face.preprocessor.INormLBP(
+  face_cropper = face_cropper,
+  dtype = np.float64
+)
+
+
+
+#### FEATURE EXTRACTOR ######
+
+gabor_graph = bob.bio.face.extractor.GridGraph(
+    # Gabor parameters
+    gabor_sigma=math.sqrt(2.0) * math.pi,
+    # what kind of information to extract
+    normalize_gabor_jets=True,
+    # setup of the fixed grid
+    node_distance=(8, 8),
+)
+
+transformer = make_pipeline(
+    wrap(
+        ["sample"],
+        preprocessor,
+        transform_extra_arguments=transform_extra_arguments,
+    ),
+    wrap(["sample"], gabor_graph),
+)
+
+
+
+gabor_jet = bob.bio.face.algorithm.GaborJet(
+    gabor_jet_similarity_type="PhaseDiffPlusCanberra",
+    multiple_feature_scoring="max_jet",
+    gabor_sigma=math.sqrt(2.0) * math.pi,
+)
+
+tempdir = tempfile.TemporaryDirectory()
+algorithm = BioAlgorithmLegacy(gabor_jet, base_dir=tempdir.name)
+
+pipeline = VanillaBiometricsPipeline(
+    transformer,
+    algorithm
+)
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index cf04066319112936c51b36b74f682471fd3856df..a93ea2b6988e5b1736464fa6be2b8c1b096a3dd1 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -1,5 +1,7 @@
 from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
+from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
+
 
 
 if "database" in locals():
@@ -11,3 +13,10 @@ else:
 
 
 transformer = embedding_transformer_160x160(InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions)
+
+algorithm = Distance()
+
+pipeline = VanillaBiometricsPipeline(
+    transformer,
+    algorithm
+)
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index b69d10dceec087a1e3c390f9cb1d74f063207b22..041307037bceb14b91e9c3e41be8c59ce347aed0 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -1,5 +1,7 @@
 from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
+from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
+
 
 
 if "database" in locals():
@@ -11,3 +13,10 @@ else:
 
 
 transformer = embedding_transformer_160x160(InceptionResnetv1_MsCeleb(), annotation_type, fixed_positions)
+
+algorithm = Distance()
+
+pipeline = VanillaBiometricsPipeline(
+    transformer,
+    algorithm
+)
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index 068bffafd47a4f5e595313a62728f2415c602945..4cd5fa484aa8fad0d777c2e78c19c083b707a1b1 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -1,5 +1,7 @@
 from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
+from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
+
 
 
 if "database" in locals():
@@ -11,3 +13,10 @@ else:
 
 
 transformer = embedding_transformer_160x160(InceptionResnetv2_CasiaWebFace(), annotation_type, fixed_positions)
+
+algorithm = Distance()
+
+pipeline = VanillaBiometricsPipeline(
+    transformer,
+    algorithm
+)
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index 1a4bbd792f3f8cd8ab157a3336789c2c3be10aeb..957422673d5619769725a0406da7a59ec773dd76 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -1,5 +1,7 @@
 from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
+from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
+
 
 
 if "database" in locals():
@@ -10,4 +12,11 @@ else:
     fixed_positions = None
 
 
-transformer = embedding_transformer_160x160(InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions)
\ No newline at end of file
+transformer = embedding_transformer_160x160(InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions)
+
+algorithm = Distance()
+
+pipeline = VanillaBiometricsPipeline(
+    transformer,
+    algorithm
+)
diff --git a/bob/bio/face/extractor/DCTBlocks.py b/bob/bio/face/extractor/DCTBlocks.py
index cceb1b1253a583b865c4a75f2614636ef91a7ae0..28afad823ccaa6547be38dcee8a1c481fd905280 100644
--- a/bob/bio/face/extractor/DCTBlocks.py
+++ b/bob/bio/face/extractor/DCTBlocks.py
@@ -129,11 +129,7 @@ class DCTBlocks(TransformerMixin, BaseEstimator):
             return self.dct_features(image)
 
         if isinstance(X, SampleBatch):
-            extracted = []
-            X = check_array(X, allow_nd=True)
-            for x in X:
-                extracted.append(_extract(x))
-            return extracted
+            return [_extract(x) for x in X]
         else:
             return _extract(X)
 
diff --git a/bob/bio/face/extractor/GridGraph.py b/bob/bio/face/extractor/GridGraph.py
index 6b905ab1ff96e4cb9dd5014e14324701ab847a0e..510eac2a2bae59c949504ba1abf65dbd33134527 100644
--- a/bob/bio/face/extractor/GridGraph.py
+++ b/bob/bio/face/extractor/GridGraph.py
@@ -73,7 +73,6 @@ class GridGraph(TransformerMixin, BaseEstimator):
         first_node=None,  # one or two integral values, or None -> automatically determined
     ):
 
-
         self.gabor_directions = gabor_directions
         self.gabor_scales = gabor_scales
         self.gabor_sigma = gabor_sigma
@@ -152,7 +151,7 @@ class GridGraph(TransformerMixin, BaseEstimator):
             return self._aligned_graph
 
         # check if a new extractor needs to be created
-        if self._last_image_resolution != image.shape:
+        if self._graph is None or self._last_image_resolution != image.shape:
             self._last_image_resolution = image.shape
             if self.first_node is None:
                 # automatically compute the first node
@@ -203,6 +202,8 @@ class GridGraph(TransformerMixin, BaseEstimator):
     """
 
         def _extract(image):
+            import ipdb; ipdb.set_trace()
+
             assert image.ndim == 2
             assert isinstance(image, numpy.ndarray)
             image = image.astype(numpy.float64)
@@ -223,13 +224,9 @@ class GridGraph(TransformerMixin, BaseEstimator):
             return self.__class__.serialize_jets(jets)
 
         if isinstance(X, SampleBatch):
-            extracted = []
-            X = check_array(X, allow_nd=True)
-            for x in X:
-                extracted.append(_extract(x))
-            return extracted
+            return [_extract(x) for x in X]
         else:
-            return _extract(X)            
+            return _extract(X)
 
     def write_feature(self, feature, feature_file):
         """Writes the feature extracted by the `__call__` function to the given file.
@@ -293,4 +290,4 @@ class GridGraph(TransformerMixin, BaseEstimator):
         return {"stateless": True, "requires_fit": False}
 
     def fit(self, X, y=None):
-        return self
\ No newline at end of file
+        return self
diff --git a/bob/bio/face/extractor/LGBPHS.py b/bob/bio/face/extractor/LGBPHS.py
index d1283ec32988bcdb0b2054742756ad1dd835b0d7..40b772bb0491dcf3e1ca0352a7e62036c5b4391f 100644
--- a/bob/bio/face/extractor/LGBPHS.py
+++ b/bob/bio/face/extractor/LGBPHS.py
@@ -276,11 +276,7 @@ class LGBPHS(TransformerMixin, BaseEstimator):
 
 
         if isinstance(X, SampleBatch):
-            extracted = []
-            X = check_array(X, allow_nd=True)
-            for x in X:
-                extracted.append(_extract(x))
-            return extracted
+            return [_extract(x) for x in X]
         else:
             return _extract(X) 
 
diff --git a/bob/bio/face/preprocessor/Base.py b/bob/bio/face/preprocessor/Base.py
index d5570e86705feb1942473f39a46aad2429368e81..0e296e423f547daf987377aa1e3168fe2396a36d 100644
--- a/bob/bio/face/preprocessor/Base.py
+++ b/bob/bio/face/preprocessor/Base.py
@@ -44,6 +44,7 @@ class Base(TransformerMixin, BaseEstimator):
     channel : 2D or 3D :py:class:`numpy.ndarray`
       The extracted color channel.
     """
+
         if image.ndim == 2:
             if self.channel == "rgb":
                 return bob.ip.color.gray_to_rgb(image)
diff --git a/bob/bio/face/preprocessor/INormLBP.py b/bob/bio/face/preprocessor/INormLBP.py
index bcf9d3c36c8bceb822d698f15dfae9b25dfbc562..eb9bb2754430ad248fc9a64a429ff907bb9d2ba7 100644
--- a/bob/bio/face/preprocessor/INormLBP.py
+++ b/bob/bio/face/preprocessor/INormLBP.py
@@ -22,7 +22,8 @@ import bob.ip.base
 import numpy
 from .Base import Base
 from .utils import load_cropper
-
+from sklearn.utils import check_array
+from bob.pipelines.sample import SampleBatch
 
 class INormLBP(Base):
     """Performs I-Norm LBP on the given image"""
@@ -116,22 +117,20 @@ class INormLBP(Base):
       The cropped and photometrically enhanced face.
     """
 
-        def _crop(image, annotations):
+        def _crop(image, annotations=None):
             image = self.color_channel(image)
             if self.cropper is not None:
-
-                if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
-                    self.cropper = self.cropper()
-
-                image = self.cropper.crop_face(image, annotations)
+                image = self.cropper.transform(image, annotations=annotations)
             image = self.lbp_extractor(image)
             return self.data_type(image)
+                
+        if isinstance(X, SampleBatch):
+
+            if annotations is None:
+                return [_crop(data) for data in X]
+            else:
+                return [_crop(data, annot) for data, annot in zip(X, annotations)]
 
-        if isinstance(annotations, list):
-            cropped_images = []
-            for img, annot in zip(X, annotations):
-                cropped_images.append(_crop(img, annot))
-            return cropped_images
         else:
             return _crop(X, annotations)
 
diff --git a/bob/bio/face/preprocessor/Scale.py b/bob/bio/face/preprocessor/Scale.py
index ced7dc6876424be7b92e20a517ba40a2ddadce7b..5adce7ad77a46b881a6de9e3d85aedffca094442 100644
--- a/bob/bio/face/preprocessor/Scale.py
+++ b/bob/bio/face/preprocessor/Scale.py
@@ -31,7 +31,7 @@ class Scale(TransformerMixin, BaseEstimator):
     def fit(self, X, y=None):
         return self
 
-    def transform(self, X):
+    def transform(self, X, annotations=None):
         """
         Resize an image given a shape
 
@@ -44,16 +44,19 @@ class Scale(TransformerMixin, BaseEstimator):
           target_img_size: tuple
              Target image size
 
-        """
-
+        """        
         def _resize(x):
             return resize(x, self.target_img_size, anti_aliasing=True)
 
         X = check_array(X, allow_nd=True)
 
-        if X.ndim <= 3 and X.ndim >= 4:
+        if X.ndim < 2 or X.ndim > 4:
             raise ValueError(f"Invalid image shape {X.shape}")
 
+        if X.ndim == 2:
+            # Checking if it's bob format CxHxW
+            return _resize(X)
+
         if X.ndim == 3:
             # Checking if it's bob format CxHxW
             if X.shape[0] == 3:
diff --git a/bob/bio/face/preprocessor/utils.py b/bob/bio/face/preprocessor/utils.py
index 604cd002035ef282630c75de7121b70eb3248312..9ea3eea51e5025b202ee2817458c0438db91d712 100644
--- a/bob/bio/face/preprocessor/utils.py
+++ b/bob/bio/face/preprocessor/utils.py
@@ -10,16 +10,11 @@ def load_cropper(face_cropper):
         cropper = None
     elif isinstance(face_cropper, six.string_types):
         cropper = bob.bio.base.load_resource(face_cropper, "preprocessor")
-    elif isinstance(face_cropper, (FaceCrop, FaceDetect)):
-        cropper = face_cropper
+    # elif isinstance(face_cropper, (FaceCrop, FaceDetect)):
+    #    cropper = face_cropper
     else:
-        raise ValueError("The given face cropper type is not understood")
+        cropper = face_cropper
 
-    assert (
-        cropper is None
-        or isinstance(cropper, (FaceCrop, FaceDetect))
-        or isinstance(cropper, functools.partial)
-    )
     return cropper
 
 
diff --git a/bob/bio/face/test/data/ada.png b/bob/bio/face/test/data/ada.png
new file mode 100644
index 0000000000000000000000000000000000000000..1444aaa27d50e8c5a330cb6f0467a7dbcf8ca88b
Binary files /dev/null and b/bob/bio/face/test/data/ada.png differ
diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py
index 49c561137b12faf6254d28a68e0a9452cd0a9243..1d2ce6a442455c4a697ca541f983644fc0004b57 100644
--- a/bob/bio/face/test/test_baselines.py
+++ b/bob/bio/face/test/test_baselines.py
@@ -1,95 +1,120 @@
 from bob.extension.config import load
 import pkg_resources
 import numpy as np
-from bob.pipelines import Sample, SampleSet
+from bob.pipelines import Sample, SampleSet, DelayedSample
 from bob.bio.base import load_resource
+from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics, dask_vanilla_biometrics
+import tempfile
+import os
+import bob.io.base
+import functools
+import copy
 
 
-def get_fake_sample_set(
-    face_size=(160, 160), eyes={"leye": (46, 107), "reye": (46, 53)}
-):
+images = dict()
+images["bioref"] = (
+    pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.jpg"),
+    {"reye": (131, 176), "leye": (222, 170)},
+)
+images["probe"] = (
+    pkg_resources.resource_filename("bob.bio.face.test", "data/ada.png"),
+    {"reye": (440, 207), "leye": (546, 207)},
+)
+
+
+def get_fake_sample_set(face_size=(160, 160), purpose="bioref"):
+
+    data = images[purpose][0]
+    annotations = images[purpose][1]
+    key = "1" if purpose == "bioref" else "2"
 
-    data = np.random.rand(3, 400, 400)
-    annotations = {"leye": (115, 267), "reye": (115, 132)}
     return [
         SampleSet(
-            [Sample(data, key="1", annotations=annotations)],
-            key="1",
-            subject="1",
+            [
+                DelayedSample(
+                    load=functools.partial(bob.io.base.load, data),
+                    key=key,
+                    annotations=annotations,
+                )
+            ],
+            key=key,
+            subject=key,
             references=["1"],
         )
     ]
 
 
-def test_facenet_baseline():
+def run_baseline(baseline):
+
+    biometric_references = get_fake_sample_set(purpose="bioref")
+    probes = get_fake_sample_set(purpose="probe")
 
-    biometric_references = get_fake_sample_set()
-    probes = get_fake_sample_set()
-    
     # Regular pipeline
-    pipeline = load_resource("facenet_sanderberg", "baseline")
+    pipeline = load_resource(baseline, "baseline")
     scores = pipeline([], biometric_references, probes)
-    assert len(scores)==1
-    assert len(scores[0])==1
+    assert len(scores) == 1
+    assert len(scores[0]) == 1
 
 
-    # Regular with 
+    # CHECKPOINTING
+    import ipdb; ipdb.set_trace()
+    with tempfile.TemporaryDirectory() as d:
 
-    # fake_sample = get_fake_sample()
+        checkpoint_pipeline = checkpoint_vanilla_biometrics(copy.deepcopy(pipeline), base_dir=d)
+        checkpoint_scores = checkpoint_pipeline([], biometric_references, probes)
+        assert len(checkpoint_scores) == 1
+        assert len(checkpoint_scores[0]) == 1
+        assert np.isclose(scores[0][0].data, checkpoint_scores[0][0].data)
 
-    # transformed_sample = transformer.transform([fake_sample])[0]
-    # transformed_data = transformed_sample.data
-    # assert transformed_sample.data.size == 128
+        dirs = os.listdir(d)
+        assert "biometric_references" in dirs
+        assert "samplewrapper-1" in dirs
+        assert "samplewrapper-2" in dirs
+        assert "scores" in dirs
 
 
-def test_inception_resnetv2_msceleb():
-    transformer = load_resource("inception_resnetv2_msceleb", "baseline")
+    # DASK
+    with tempfile.TemporaryDirectory() as d:
 
-    fake_sample = get_fake_sample()
+        dask_pipeline = dask_vanilla_biometrics(checkpoint_vanilla_biometrics(copy.deepcopy(pipeline), base_dir=d))
+        dask_scores = dask_pipeline([], biometric_references, probes)
+        dask_scores = dask_scores.compute(scheduler="single-threaded") 
+        assert len(dask_scores) == 1
+        assert len(dask_scores[0]) == 1
+        assert np.isclose(scores[0][0].data, dask_scores[0][0].data)
 
-    transformed_sample = transformer.transform([fake_sample])[0]
-    transformed_data = transformed_sample.data
-    assert transformed_sample.data.size == 128
+        dirs = os.listdir(d)
+        assert "biometric_references" in dirs
+        assert "samplewrapper-1" in dirs
+        assert "samplewrapper-2" in dirs
+        assert "scores" in dirs
 
 
-def test_inception_resnetv2_casiawebface():
-    transformer = load_resource("inception_resnetv2_casiawebface", "baseline")
+def test_facenet_baseline():
+    run_baseline("facenet_sanderberg")
 
-    fake_sample = get_fake_sample()
 
-    transformed_sample = transformer.transform([fake_sample])[0]
-    transformed_data = transformed_sample.data
-    assert transformed_sample.data.size == 128
+def test_inception_resnetv2_msceleb():
+    run_baseline("inception_resnetv2_msceleb")
 
 
-def test_inception_resnetv1_msceleb():
-    transformer = load_resource("inception_resnetv1_msceleb", "baseline")
+def test_inception_resnetv2_casiawebface():
+    run_baseline("inception_resnetv2_casiawebface")
 
-    fake_sample = get_fake_sample()
 
-    transformed_sample = transformer.transform([fake_sample])[0]
-    transformed_data = transformed_sample.data
-    assert transformed_sample.data.size == 128
+def test_inception_resnetv1_msceleb():
+    run_baseline("inception_resnetv1_msceleb")
 
 
 def test_inception_resnetv1_casiawebface():
-    transformer = load_resource("inception_resnetv1_casiawebface", "baseline")
-
-    fake_sample = get_fake_sample()
-
-    transformed_sample = transformer.transform([fake_sample])[0]
-    transformed_data = transformed_sample.data
-    assert transformed_sample.data.size == 128
-
+    run_baseline("inception_resnetv1_casiawebface")
 
 def test_arcface_insight_tf():
     import tensorflow as tf
-
     tf.compat.v1.reset_default_graph()
-    transformer = load_resource("arcface_insight_tf", "baseline")
 
-    fake_sample = get_fake_sample()
+    run_baseline("arcface_insight_tf")
+
 
-    transformed_sample = transformer.transform([fake_sample])[0]
-    transformed_data = transformed_sample.data
-    assert transformed_sample.data.size == 512
+def test_gabor_graph():
+    run_baseline("gabor_graph")    
\ No newline at end of file
diff --git a/bob/bio/face/test/test_transformers.py b/bob/bio/face/test/test_transformers.py
index ab73788435171a0991dcc04596601cdcf9f25542..fec5c2637bb1115cafdef443723016cfbb8cd55f 100644
--- a/bob/bio/face/test/test_transformers.py
+++ b/bob/bio/face/test/test_transformers.py
@@ -11,8 +11,8 @@ def get_fake_sample(face_size=(160, 160), eyes={"leye": (46, 107), "reye": (46,
     return Sample(data, key="1", annotations=annotations)
 
 
-def test_facenet_baseline():    
-    transformer = load_resource("facenet_sanderberg", "baseline")
+def test_facenet():    
+    transformer = load_resource("facenet_sanderberg", "transformer")
 
     fake_sample = get_fake_sample()
 
@@ -22,7 +22,7 @@ def test_facenet_baseline():
 
 
 def test_inception_resnetv2_msceleb():
-    transformer = load_resource("inception_resnetv2_msceleb", "baseline")
+    transformer = load_resource("inception_resnetv2_msceleb", "transformer")
 
     fake_sample = get_fake_sample()
 
@@ -32,7 +32,7 @@ def test_inception_resnetv2_msceleb():
 
 
 def test_inception_resnetv2_casiawebface():
-    transformer = load_resource("inception_resnetv2_casiawebface", "baseline")
+    transformer = load_resource("inception_resnetv2_casiawebface", "transformer")
 
     fake_sample = get_fake_sample()
 
@@ -42,7 +42,7 @@ def test_inception_resnetv2_casiawebface():
 
 
 def test_inception_resnetv1_msceleb():
-    transformer = load_resource("inception_resnetv1_msceleb", "baseline")
+    transformer = load_resource("inception_resnetv1_msceleb", "transformer")
 
     fake_sample = get_fake_sample()
 
@@ -52,7 +52,7 @@ def test_inception_resnetv1_msceleb():
 
 
 def test_inception_resnetv1_casiawebface():
-    transformer = load_resource("inception_resnetv1_casiawebface", "baseline")
+    transformer = load_resource("inception_resnetv1_casiawebface", "transformer")
 
     fake_sample = get_fake_sample()
 
@@ -64,10 +64,21 @@ def test_inception_resnetv1_casiawebface():
 def test_arcface_insight_tf():
     import tensorflow as tf
     tf.compat.v1.reset_default_graph()
-    transformer = load_resource("arcface_insight_tf", "baseline")
+    transformer = load_resource("arcface_insight_tf", "transformer")
 
     fake_sample = get_fake_sample()
 
     transformed_sample = transformer.transform([fake_sample])[0]
     transformed_data = transformed_sample.data
     assert transformed_sample.data.size == 512
+
+
+def test_gabor_graph():
+    transformer = load_resource("gabor_graph", "transformer")
+
+    fake_sample = get_fake_sample()
+
+    transformed_sample = transformer.transform([fake_sample])[0]
+    transformed_data = transformed_sample.data
+
+    assert len(transformed_sample.data) == 80
diff --git a/develop.cfg b/develop.cfg
index 5fafd36ce01e6fa8420cf5be0261782f921c45e9..59cf119b3c5a9460a5713a0b6d5678c4a188a48a 100644
--- a/develop.cfg
+++ b/develop.cfg
@@ -6,6 +6,7 @@ parts = scripts
 
 develop = src/bob.pipelines
           src/bob.bio.base
+          src/bob.ip.gabor
           .
           
 
@@ -13,6 +14,7 @@ develop = src/bob.pipelines
 eggs = bob.bio.face
        bob.pipelines
        bob.bio.base
+       bob.ip.gabor
  
 
 extensions = bob.buildout
@@ -26,7 +28,7 @@ auto-checkout = *
 [sources]
 bob.pipelines = git git@gitlab.idiap.ch:bob/bob.pipelines
 bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base
-
+bob.ip.gabor = git git@gitlab.idiap.ch:bob/bob.ip.gabor
 
 [scripts]
 recipe = bob.buildout:scripts
diff --git a/setup.py b/setup.py
index 2f920af704fbf94ec50463bba58e015ad6fe4132..734f96231fb75a5a844af046b128a3e4ac940892 100644
--- a/setup.py
+++ b/setup.py
@@ -148,6 +148,7 @@ setup(
           'inception_resnetv1_msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb:transformer',
           'inception_resnetv2_msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb:transformer',
           'arcface_insight_tf = bob.bio.face.config.baseline.arcface_insight_tf:transformer',
+          'gabor_graph = bob.bio.face.config.baseline.gabor_graph:transformer',
         ],
 
         #baselines
@@ -157,7 +158,7 @@ setup(
           'inception_resnetv2_casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:pipeline',
           'inception_resnetv1_msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb:pipeline',
           'inception_resnetv2_msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb:pipeline',
-          'arcface_insight_tf = bob.bio.face.config.baseline.arcface_insight_tf:pipeline',
+          'gabor_graph = bob.bio.face.config.baseline.gabor_graph:pipeline',
         ],