diff --git a/bob/bio/face/config/baseline/arcface_insightface.py b/bob/bio/face/config/baseline/arcface_insightface.py
index db164c301c785c23be4be2710351d9436b84f18b..4e2975fb53fa08ac6b0a073f161a33958ffe14e3 100644
--- a/bob/bio/face/config/baseline/arcface_insightface.py
+++ b/bob/bio/face/config/baseline/arcface_insightface.py
@@ -1,41 +1,18 @@
 from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-    else:
-        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return arcface_baseline(
         embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index bdefacb8c1c1ea1167c86c8077fc17c8555aa35c..3197d061a5491a1df905a3e9b4a9117f3660b76d 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -1,39 +1,19 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     FaceNetSanderberg_20170512_110547,
 )
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return facenet_baseline(
         embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index 0daeaf093a87f852bb5fdfa263a7852c3b847578..1ea0668e8f4509f3ccf88b76755042961306c039 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -1,6 +1,4 @@
-import bob.bio.face
 from sklearn.pipeline import make_pipeline
-from bob.bio.base.wrappers import wrap_sample_preprocessor
 from bob.pipelines import wrap
 from bob.bio.face.helpers import face_crop_solver
 import numpy as np
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index b532c815b07fe09cf307a8f267569585273fec10..0d88bbadf686293354508d967a88c9ac12c669b9 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -1,40 +1,21 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_Casia_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return facenet_baseline(
         embedding=InceptionResnetv1_Casia_CenterLoss_2018(
             memory_demanding=memory_demanding
         ),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index db94eddd46344b6b48f569fc51c2ee375fdb6996..766f1cf22bc70026d1f4e839f8f18e0e709c69fa 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -1,40 +1,22 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_MsCeleb_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
+
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return facenet_baseline(
         embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
             memory_demanding=memory_demanding
         ),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index c20d856c366f263634eecf7987e66db8aeb737f5..1f56b45152453fc430e97a261fa757e46788a66b 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -1,40 +1,22 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_Casia_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
+
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return facenet_baseline(
         embedding=InceptionResnetv2_Casia_CenterLoss_2018(
             memory_demanding=memory_demanding
         ),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index d44585ebe97b16a075dcf194143eb92c02c6d5fa..2a4bd3d17b0cc2de989bdda04a5b1b97d7feb884 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -1,40 +1,21 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_MsCeleb_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return facenet_baseline(
         embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
             memory_demanding=memory_demanding
         ),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
index 2ca17ebeda8e954ffb45e85beb36911d673aeb92..61b9db989b2cd5216b554592b4e93bc889fbfe2d 100644
--- a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
@@ -1,41 +1,19 @@
 from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
+
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-    else:
-        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return arcface_baseline(
         embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
index c505666297de735a746dc362388d4946ee1bd398..442247b5d4d1e2f619c2c47af89c2c21997dd444 100644
--- a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
@@ -1,42 +1,18 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
 
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-    else:
-        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return arcface_baseline(
         embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
index 05eaa49bc610ad3fc5aecbcc6a23977897e96890..95a3a95bc5eb18b8bbb4f1f979f9eba3b294f4d2 100644
--- a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
@@ -1,41 +1,18 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
+
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-    else:
-        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return arcface_baseline(
         embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/templates.py b/bob/bio/face/config/baseline/templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf6c6c4fdb6553cfb8c42987edfbb430c76f8f2b
--- /dev/null
+++ b/bob/bio/face/config/baseline/templates.py
@@ -0,0 +1,53 @@
+from bob.bio.face.config.baseline.helpers import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+def arcface_baseline(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def facenet_baseline(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
index 619bcf82a093dfa60fb7bc2364ada55d5c983f65..87862c7ae08c94ad80f134c7c301c503b87c1e95 100644
--- a/bob/bio/face/config/baseline/tf2_inception_resnet.py
+++ b/bob/bio/face/config/baseline/tf2_inception_resnet.py
@@ -1,45 +1,22 @@
 from bob.extension import rc
 from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.config.baseline.helpers import (
-    lookup_config_from_database,
-    dnn_default_cropping,
-    embedding_transformer,
-)
-
-from sklearn.pipeline import make_pipeline
-from bob.pipelines.wrappers import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.config.baseline.helpers import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
 def load(annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
     extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
     embedding = InceptionResnetv2(
         checkpoint_path=extractor_path, memory_demanding=memory_demanding
     )
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
+    return facenet_baseline(
         embedding=embedding,
-        cropped_positions=cropped_positions,
+        annotation_type=annotation_type,
         fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer