diff --git a/bob/bio/face/__init__.py b/bob/bio/face/__init__.py
index 4f3858c5fbc180e24a8368de4c55b4e90e73e3c1..9bda18df60dfb0eb66ae95f6292e682150a0e17f 100644
--- a/bob/bio/face/__init__.py
+++ b/bob/bio/face/__init__.py
@@ -4,6 +4,7 @@ from . import algorithm
 from . import script
 from . import database
 from . import annotator
+from . import utils
 
 from . import test
 
diff --git a/bob/bio/face/config/baseline/arcface_insightface.py b/bob/bio/face/config/baseline/arcface_insightface.py
index 3aae89491d77909db49a10d3c8ef010d66e37f4a..3e0181219dbf40d17e32d36d85a909a102b87fd1 100644
--- a/bob/bio/face/config/baseline/arcface_insightface.py
+++ b/bob/bio/face/config/baseline/arcface_insightface.py
@@ -1,34 +1,18 @@
 from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
-from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-else:
-    annotation_type = None
-    fixed_positions = None
-    memory_demanding = False
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
 
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        ArcFaceInsightFace(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
-        color_channel="rgb",
-    )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
+    return arcface_baseline(
+        embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
 
 
 pipeline = load(annotation_type, fixed_positions)
diff --git a/bob/bio/face/config/baseline/dummy.py b/bob/bio/face/config/baseline/dummy.py
index 7e0317a3f6a452a83efb5d2188f48081e71c386b..9c407c8ed84a8d4374ce557e5492c2956c1fac54 100644
--- a/bob/bio/face/config/baseline/dummy.py
+++ b/bob/bio/face/config/baseline/dummy.py
@@ -5,21 +5,17 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
 )
 from bob.pipelines.transformers import SampleLinearize
+from bob.bio.face.utils import lookup_config_from_database
 
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-else:
-    annotation_type = None
-    fixed_positions = None
-
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 import bob.ip.color
 from sklearn.base import TransformerMixin, BaseEstimator
-class ToGray(TransformerMixin, BaseEstimator):
 
+
+class ToGray(TransformerMixin, BaseEstimator):
     def transform(self, X, annotations=None):
-        return [bob.ip.color.rgb_to_gray(data)[0:10,0:10] for data in X]
+        return [bob.ip.color.rgb_to_gray(data)[0:10, 0:10] for data in X]
 
     def _more_tags(self):
         return {"stateless": True, "requires_fit": False}
@@ -34,9 +30,7 @@ def load(annotation_type, fixed_positions=None):
 
     transformer = make_pipeline(
         wrap(
-            ["sample"],
-            ToGray(),
-            transform_extra_arguments=transform_extra_arguments,
+            ["sample"], ToGray(), transform_extra_arguments=transform_extra_arguments,
         ),
         SampleLinearize(),
     )
diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index e3d1dc269922f9633feceab0b988514ff42b37d2..510d9287fba4613f072482f0f57026d8fec824ba 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -1,35 +1,20 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     FaceNetSanderberg_20170512_110547,
 )
-from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-
-    transformer = embedding_transformer_160x160(
-        FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return facenet_baseline(
+        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
 
 
 pipeline = load(annotation_type, fixed_positions)
diff --git a/bob/bio/face/config/baseline/gabor_graph.py b/bob/bio/face/config/baseline/gabor_graph.py
index 1f1a786060b6041c2549ac55436e6e623be495bd..b21d10ae7ad10e677af7a9d9e9d3d9d18cbb15f8 100644
--- a/bob/bio/face/config/baseline/gabor_graph.py
+++ b/bob/bio/face/config/baseline/gabor_graph.py
@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
     BioAlgorithmLegacy,
 )
-from bob.bio.face.config.baseline.helpers import crop_80x64
+from bob.bio.face.utils import (
+    lookup_config_from_database,
+    legacy_default_cropping,
+    make_cropper,
+)
 import math
 import numpy as np
 import bob.bio.face
@@ -17,20 +21,9 @@ import logging
 logger = logging.getLogger(__name__)
 
 #### SOLVING IF THERE'S ANY DATABASE INFORMATION
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-else:
-    annotation_type = None
-    fixed_positions = None
-
-
-def get_cropper(annotation_type, fixed_positions=None):
-    # Cropping
-    face_cropper, transform_extra_arguments = crop_80x64(
-        annotation_type, fixed_positions, color_channel="gray"
-    )
-    return face_cropper, transform_extra_arguments
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def get_pipeline(face_cropper, transform_extra_arguments):
@@ -80,9 +73,22 @@ def get_pipeline(face_cropper, transform_extra_arguments):
 
 def load(annotation_type, fixed_positions=None):
     ####### SOLVING THE FACE CROPPER TO BE USED ##########
-    face_cropper, transform_extra_arguments = get_cropper(
-        annotation_type, fixed_positions
+
+    # Define cropped positions
+    CROPPED_IMAGE_HEIGHT = 80
+    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
+
+    # Cropping
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="gray",
+        annotator="mtcnn",
     )
+
     return get_pipeline(face_cropper, transform_extra_arguments)
 
 
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index acc6f981aecc0ae7312097e68536055605c50ce6..149c074270a1001513dfbeacb0cbf09325784b28 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -1,36 +1,23 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_Casia_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv1_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return facenet_baseline(
+        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index 70a1a58fdee70f1566a1bfb63e10493cf5599809..f7ce09f7d1d2bd57eee0490b269c0b5a857c1a8d 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -1,36 +1,24 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_MsCeleb_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
 
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv1_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return facenet_baseline(
+        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index 3dd2704330cadf49873af96489cc2d9d51e27cfd..eadd91541b85511f31e48ad97140ba6bc5dc5d61 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -1,36 +1,24 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_Casia_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
 
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv2_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return facenet_baseline(
+        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index ba339bcf11dd9928ed37f1f9d95eeba7f559f8fc..0be122d36c854becb94ef192ecc79d5134682975 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -1,36 +1,23 @@
 from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_MsCeleb_CenterLoss_2018,
 )
-from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv2_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return facenet_baseline(
+        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/lda.py b/bob/bio/face/config/baseline/lda.py
index 1343f51a431ee7b97d0ca704c9ec910f57c1230d..de1935cb45c947e8720f1ae385dc5c42e80bb033 100644
--- a/bob/bio/face/config/baseline/lda.py
+++ b/bob/bio/face/config/baseline/lda.py
@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
     BioAlgorithmLegacy,
 )
-from bob.bio.face.config.baseline.helpers import crop_80x64
+from bob.bio.face.utils import (
+    lookup_config_from_database,
+    legacy_default_cropping,
+    make_cropper,
+)
 import numpy as np
 import bob.bio.face
 from sklearn.pipeline import make_pipeline
@@ -18,20 +22,27 @@ import logging
 logger = logging.getLogger(__name__)
 
 #### SOLVING IF THERE'S ANY DATABASE INFORMATION
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 ####### SOLVING THE FACE CROPPER TO BE USED ##########
 def load(annotation_type, fixed_positions=None):
 
+    # Define cropped positions
+    CROPPED_IMAGE_HEIGHT = 80
+    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
+
     # Cropping
-    face_cropper, transform_extra_arguments = crop_80x64(
-        annotation_type, fixed_positions, color_channel="gray"
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="gray",
+        annotator="mtcnn",
     )
 
     preprocessor = bob.bio.face.preprocessor.TanTriggs(
diff --git a/bob/bio/face/config/baseline/lgbphs.py b/bob/bio/face/config/baseline/lgbphs.py
index eebcbba56258d6a85e792c4fb4e860be748e0269..b70b638abd2fc9058f1c7d5ee3c38bb8cf7d0ca8 100644
--- a/bob/bio/face/config/baseline/lgbphs.py
+++ b/bob/bio/face/config/baseline/lgbphs.py
@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
     BioAlgorithmLegacy,
 )
-from bob.bio.face.config.baseline.helpers import crop_80x64
+from bob.bio.face.utils import (
+    lookup_config_from_database,
+    legacy_default_cropping,
+    make_cropper,
+)
 import math
 import numpy as np
 import bob.bio.face
@@ -13,20 +17,9 @@ import bob.math
 
 
 #### SOLVING IF THERE'S ANY DATABASE INFORMATION
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-else:
-    annotation_type = None
-    fixed_positions = None
-
-
-def get_cropper(annotation_type, fixed_positions=None):
-    # Cropping
-    face_cropper, transform_extra_arguments = crop_80x64(
-        annotation_type, fixed_positions, color_channel="gray"
-    )
-    return face_cropper, transform_extra_arguments
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def get_pipeline(face_cropper, transform_extra_arguments):
@@ -70,10 +63,22 @@ def get_pipeline(face_cropper, transform_extra_arguments):
 
 
 def load(annotation_type, fixed_positions=None):
+    # Define cropped positions
+    CROPPED_IMAGE_HEIGHT = 80
+    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
+
     ####### SOLVING THE FACE CROPPER TO BE USED ##########
-    face_cropper, transform_extra_arguments = get_cropper(
-        annotation_type, fixed_positions
+    # Cropping
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="gray",
+        annotator="mtcnn",
     )
+
     return get_pipeline(face_cropper, transform_extra_arguments)
 
 
diff --git a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
index e68e7ef3edf5d8c080efc9aac7bf06a39d85982b..2355494888b81e8d8f7e8e49dc4ba8a3ba43daea 100644
--- a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
@@ -1,33 +1,20 @@
 from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
-from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
 
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
-    )
-
-    algorithm = Distance()
 
-    return VanillaBiometricsPipeline(transformer, algorithm)
+    return arcface_baseline(
+        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
 
 
 pipeline = load(annotation_type, fixed_positions)
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
index dfeb4b74af2958879e83c74b4428c2cb7601d308..0560a97a62f42d33f5d8c54c3177760db61ab9fc 100644
--- a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
@@ -1,34 +1,20 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
-from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
 
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return arcface_baseline(
+        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
index b8f13ec4834df8198225b66f3e1dfa0f22ed7f7b..64d6ec4c84fbb43c1ef3061c070c6a568e7d74d6 100644
--- a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
@@ -1,34 +1,20 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
-from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import arcface_baseline
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
 
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-else:
-    annotation_type = None
-    fixed_positions = None
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    return arcface_baseline(
+        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/templates.py b/bob/bio/face/config/baseline/templates.py
new file mode 100644
index 0000000000000000000000000000000000000000..415ecd2831432dfb96b94ff4eae814409cdc2088
--- /dev/null
+++ b/bob/bio/face/config/baseline/templates.py
@@ -0,0 +1,54 @@
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+def arcface_baseline(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def facenet_baseline(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
index 9804831574e62e555bc70213915f340f965bba17..38d78c53de52cac64e1e668f98c72635789adf99 100644
--- a/bob/bio/face/config/baseline/tf2_inception_resnet.py
+++ b/bob/bio/face/config/baseline/tf2_inception_resnet.py
@@ -1,55 +1,24 @@
 from bob.extension import rc
 from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_default_cropping,
-    embedding_transformer,
-)
+from bob.bio.face.utils import lookup_config_from_database
+from bob.bio.face.config.baseline.templates import facenet_baseline
 
-from sklearn.pipeline import make_pipeline
-from bob.pipelines.wrappers import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
 )
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-
-else:
-    annotation_type = None
-    fixed_positions = None
-
 
 def load(annotation_type, fixed_positions=None):
-    CROPPED_IMAGE_SIZE = (160, 160)
-    CROPPED_POSITIONS = embedding_transformer_default_cropping(
-        CROPPED_IMAGE_SIZE, annotation_type=annotation_type
-    )
-
     extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
-
     embedding = InceptionResnetv2(
         checkpoint_path=extractor_path, memory_demanding=memory_demanding
     )
-
-    transformer = embedding_transformer(
-        CROPPED_IMAGE_SIZE,
-        embedding,
-        annotation_type,
-        CROPPED_POSITIONS,
-        fixed_positions,
+    return facenet_baseline(
+        embedding=embedding,
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
     )
 
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
 
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
diff --git a/bob/bio/face/helpers.py b/bob/bio/face/helpers.py
deleted file mode 100644
index 51bdf9b8e35b1d7004f7acbbae0841c10b7b4772..0000000000000000000000000000000000000000
--- a/bob/bio/face/helpers.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from bob.bio.face.preprocessor import FaceCrop, MultiFaceCrop, Scale
-
-
-def face_crop_solver(
-    cropped_image_size,
-    cropped_positions=None,
-    color_channel="rgb",
-    fixed_positions=None,
-    annotator=None,
-    dtype="uint8",
-):
-    """
-    Decide which face cropper to use.
-    """
-    # If there's not cropped positions, just resize
-    if cropped_positions is None:
-        return Scale(cropped_image_size)
-    else:
-        # Detects the face and crops it without eye detection
-        if isinstance(cropped_positions, list):
-            return MultiFaceCrop(
-                cropped_image_size=cropped_image_size,
-                cropped_positions_list=cropped_positions,
-                fixed_positions_list=fixed_positions,
-                color_channel=color_channel,
-                dtype=dtype,
-                annotation=annotator,
-            )
-        else:
-            return FaceCrop(
-                cropped_image_size=cropped_image_size,
-                cropped_positions=cropped_positions,
-                color_channel=color_channel,
-                fixed_positions=fixed_positions,
-                dtype=dtype,
-                annotator=annotator,
-            )
diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py
index 14162f2ee9b19b65222d97f32f1f766ba0b45696..21d23b15a58fb0b333fc6d1639cb52975e418d31 100644
--- a/bob/bio/face/preprocessor/FaceCrop.py
+++ b/bob/bio/face/preprocessor/FaceCrop.py
@@ -114,42 +114,12 @@ class FaceCrop(Base):
         allow_upside_down_normalized_faces=False,
         **kwargs,
     ):
-
+        # call base class constructor
         Base.__init__(self, **kwargs)
 
         if isinstance(cropped_image_size, int):
             cropped_image_size = (cropped_image_size, cropped_image_size)
 
-        if isinstance(cropped_positions, str):
-            face_size = cropped_image_size[0]
-
-            if cropped_positions == "eyes-center":
-                eyes_distance = (face_size + 1) / 2.0
-                eyes_center = (face_size / 4.0, (face_size - 0.5) / 2.0)
-                right_eye = (eyes_center[0], eyes_center[1] - eyes_distance / 2)
-                left_eye = (eyes_center[0], eyes_center[1] + eyes_distance / 2)
-                cropped_positions = {"reye": right_eye, "leye": left_eye}
-
-            elif cropped_positions == "bounding-box":
-                cropped_positions = {
-                    "topleft": (0, 0),
-                    "bottomright": cropped_image_size,
-                }
-
-            else:
-                raise ValueError(
-                    f"Got {cropped_positions} as cropped_positions "
-                    "while only eyes and bbox strings are supported."
-                )
-
-        # call base class constructor
-        self.cropped_image_size = cropped_image_size
-        self.cropped_positions = cropped_positions
-        self.fixed_positions = fixed_positions
-        self.mask_sigma = mask_sigma
-        self.mask_neighbors = mask_neighbors
-        self.mask_seed = mask_seed
-
         # check parameters
         assert len(cropped_positions) == 2
         if fixed_positions:
diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/utils.py
similarity index 53%
rename from bob/bio/face/config/baseline/helpers.py
rename to bob/bio/face/utils.py
index 764ae274e6ecf5cf3fd7926d6fe595e002a77bdb..988314f0e2e477111f762872c896368c8d5a09ce 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/utils.py
@@ -1,18 +1,40 @@
-import bob.bio.face
-from sklearn.pipeline import make_pipeline
-from bob.bio.base.wrappers import wrap_sample_preprocessor
-from bob.pipelines import wrap
-from bob.bio.face.helpers import face_crop_solver
-import numpy as np
 import logging
 
+from .preprocessor import FaceCrop
+from .preprocessor import MultiFaceCrop
+from .preprocessor import Scale
+from bob.pipelines import wrap
+from sklearn.pipeline import make_pipeline
+
 logger = logging.getLogger(__name__)
 
 
-def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
+def lookup_config_from_database(database):
     """
-    Computes the default cropped positions for the FaceCropper used with Facenet-like 
-    Embedding extractors, proportionally to the target image size
+    Read configuration values that might be already defined in the database configuration
+    file.
+    """
+    if database is not None:
+        annotation_type = database.annotation_type
+        fixed_positions = database.fixed_positions
+        memory_demanding = (
+            database.memory_demanding
+            if hasattr(database, "memory_demanding")
+            else False
+        )
+
+    else:
+        annotation_type = None
+        fixed_positions = None
+        memory_demanding = False
+
+    return annotation_type, fixed_positions, memory_demanding
+
+
+def dnn_default_cropping(cropped_image_size, annotation_type):
+    """
+    Computes the default cropped positions for the FaceCropper used with Neural-Net based
+    extractors, proportionally to the target image size
 
 
     Parameters
@@ -21,7 +43,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
           A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
 
        annotation_type: str or list of str
-          Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile', 
+          Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
           'right-profile'  and None, or a combination of those as a list
 
     Returns
@@ -33,8 +55,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
     """
     if isinstance(annotation_type, list):
         return [
-            embedding_transformer_default_cropping(cropped_image_size, item)
-            for item in annotation_type
+            dnn_default_cropping(cropped_image_size, item) for item in annotation_type
         ]
 
     CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
@@ -93,7 +114,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
 
 def legacy_default_cropping(cropped_image_size, annotation_type):
     """
-    Computes the default cropped positions for the FaceCropper used with legacy extractors, 
+    Computes the default cropped positions for the FaceCropper used with legacy extractors,
     proportionally to the target image size
 
 
@@ -103,7 +124,7 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
           A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
 
        annotation_type: str
-          Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile', 
+          Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
           'right-profile' and None, or a combination of those as a list
 
     Returns
@@ -155,27 +176,75 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
     return cropped_positions
 
 
-def embedding_transformer(
+def pad_default_cropping(cropped_image_size, annotation_type):
+    """
+    Computes the default cropped positions for the FaceCropper used in PAD applications,
+    proportionally to the target image size
+
+
+    Parameters
+    ----------
+    cropped_image_size : tuple
+        A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
+
+    annotation_type: str
+        Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None, or a combination of those as a list
+
+    Returns
+    -------
+
+    cropped_positions:
+        The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
+        ``annotation_type`` is a list
+    """
+    if cropped_image_size[0] != cropped_image_size[1]:
+        logger.warning(
+            "PAD cropping is designed for a square cropped image size. Got : {}".format(
+                cropped_image_size
+            )
+        )
+    else:
+        face_size = cropped_image_size[0]
+
+    if annotation_type == "eyes-center":
+        eyes_distance = (face_size + 1) / 2.0
+        eyes_center = (face_size / 4.0, (face_size - 0.5) / 2.0)
+        right_eye = (eyes_center[0], eyes_center[1] - eyes_distance / 2)
+        left_eye = (eyes_center[0], eyes_center[1] + eyes_distance / 2)
+        cropped_positions = {"reye": right_eye, "leye": left_eye}
+
+    elif annotation_type == "bounding-box":
+        cropped_positions = {
+            "topleft": (0, 0),
+            "bottomright": cropped_image_size,
+        }
+    else:
+        logger.warning(
+            f"Annotation type {annotation_type} is not supported. Input images will be fully scaled."
+        )
+        cropped_positions = None
+
+    return cropped_positions
+
+
+def make_cropper(
     cropped_image_size,
-    embedding,
-    annotation_type,
     cropped_positions,
     fixed_positions=None,
     color_channel="rgb",
+    annotator=None,
 ):
     """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-    
-    .. warning::
-       This will resize images to the requested `image_size`
-    
+    Solve the face FaceCropper and additionally returns the necessary
+    transform_extra_arguments for wrapping the cropper with a SampleWrapper.
+
     """
     face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
+        cropped_image_size=cropped_image_size,
         cropped_positions=cropped_positions,
         fixed_positions=fixed_positions,
+        annotator=annotator,
+        color_channel=color_channel,
         dtype="float64",
     )
 
@@ -185,159 +254,111 @@ def embedding_transformer(
         else (("annotations", "annotations"),)
     )
 
-    transformer = make_pipeline(
-        wrap(
-            ["sample"],
-            face_cropper,
-            transform_extra_arguments=transform_extra_arguments,
-        ),
-        wrap(["sample"], embedding),
-    )
-
-    return transformer
+    return face_cropper, transform_extra_arguments
 
 
-def embedding_transformer_160x160(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
+def embedding_transformer(
+    cropped_image_size,
+    embedding,
+    cropped_positions,
+    fixed_positions=None,
+    color_channel="rgb",
+    annotator=None,
 ):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
     This transformer is suited for Facenet based architectures
-    
+
     .. warning::
-       This will resize images to :math:`160 \times 160`
-    
-    """
-    cropped_positions = embedding_transformer_default_cropping(
-        (160, 160), annotation_type
-    )
+       This will resize images to the requested `image_size`
 
-    return embedding_transformer(
-        (160, 160),
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
+    """
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
         color_channel=color_channel,
+        annotator=annotator,
     )
 
-
-def embedding_transformer_112x112(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
-):
-    """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-    
-    .. warning::
-       This will resize images to :math:`112 \times 112`
-    
-    """
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-
-    else:
-        # Will use default
-        cropped_positions = embedding_transformer_default_cropping(
-            cropped_image_size, annotation_type
-        )
-
-    return embedding_transformer(
-        cropped_image_size,
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
-        color_channel=color_channel,
+    transformer = make_pipeline(
+        wrap(
+            ["sample"],
+            face_cropper,
+            transform_extra_arguments=transform_extra_arguments,
+        ),
+        wrap(["sample"], embedding),
     )
 
+    return transformer
+
 
-def embedding_transformer_224x224(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
+def face_crop_solver(
+    cropped_image_size,
+    cropped_positions=None,
+    color_channel="rgb",
+    fixed_positions=None,
+    annotator=None,
+    dtype="uint8",
 ):
     """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-    
-    .. warning::
-       This will resize images to :math:`112 \times 112`
-    
+    Decide which face cropper to use.
     """
-    cropped_image_size = (224, 224)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
+    # If there's not cropped positions, just resize
+    if cropped_positions is None:
+        return Scale(cropped_image_size)
     else:
-        # Will use default
-        cropped_positions = embedding_transformer_default_cropping(
-            cropped_image_size, annotation_type
-        )
-
-    return embedding_transformer(
-        cropped_image_size,
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
-        color_channel=color_channel,
-    )
-
-
-def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
+        # Detects the face and crops it without eye detection
+        if isinstance(cropped_positions, list):
+            return MultiFaceCrop(
+                cropped_image_size=cropped_image_size,
+                cropped_positions_list=cropped_positions,
+                fixed_positions_list=fixed_positions,
+                color_channel=color_channel,
+                dtype=dtype,
+                annotator=annotator,
+            )
+        else:
+            return FaceCrop(
+                cropped_image_size=cropped_image_size,
+                cropped_positions=cropped_positions,
+                color_channel=color_channel,
+                fixed_positions=fixed_positions,
+                dtype=dtype,
+                annotator=annotator,
+            )
+
+
+def get_default_cropped_positions(mode, cropped_image_size, annotation_type):
     """
-    Crops a face to :math:`80 \times 64`
+    Computes the default cropped positions for the FaceCropper,
+    proportionally to the target image size
 
 
     Parameters
     ----------
+    mode: str
+        Which default cropping to use. Available modes are : `legacy` (legacy baselines), `facenet`, `arcface`,
+        and `pad`.
 
-       annotation_type: str
-          Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None
-
-       fixed_positions: tuple
-          A tuple containing the annotations. This is used in case your input is already registered
-          with fixed positions (eyes or bounding box)
-
-       color_channel: str
+    cropped_image_size : tuple
+        A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
 
+    annotation_type: str
+        Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None, or a combination of those as a list
 
     Returns
     -------
 
-      face_cropper:
-         A face cropper to be used
-      
-      transform_extra_arguments:
-         The parameters to the transformer
-
+    cropped_positions:
+        The dictionary of cropped positions that will be feeded to the FaceCropper, or a list of such dictionaries if
+        ``annotation_type`` is a list
     """
-    color_channel = color_channel
-    dtype = np.float64
-
-    # Cropping
-    CROPPED_IMAGE_HEIGHT = 80
-    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
-    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-
-    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
-
-    face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        dtype=dtype,
-    )
-
-    transform_extra_arguments = (
-        None
-        if (cropped_positions is None or fixed_positions is not None)
-        else (("annotations", "annotations"),)
-    )
-
-    return face_cropper, transform_extra_arguments
+    if mode == "legacy":
+        return legacy_default_cropping(cropped_image_size, annotation_type)
+    elif mode in ["dnn", "facenet", "arcface"]:
+        return dnn_default_cropping(cropped_image_size, annotation_type)
+    elif mode == "pad":
+        return pad_default_cropping(cropped_image_size, annotation_type)
+    else:
+        raise ValueError("Unknown default cropping mode `{}`".format(mode))
diff --git a/doc/conf.py b/doc/conf.py
index be5f7f3fffe99bc877ef4849f826968b1499eb6b..cc6627616e420fec5db36ff5807f2808f668accb 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -26,7 +26,7 @@ extensions = [
     'sphinx.ext.napoleon',
     'sphinx.ext.viewcode',
     'sphinx.ext.mathjax',
-    #'matplotlib.sphinxext.plot_directive'
+    'matplotlib.sphinxext.plot_directive'
     ]
 
 # Be picky about warnings
diff --git a/doc/faq.rst b/doc/faq.rst
index e60e626f6f9815f7a2c9a77b7819e6374dd8f16c..e52795bfa90aada769beeb1b7bdc6408a69191da 100644
--- a/doc/faq.rst
+++ b/doc/faq.rst
@@ -11,4 +11,32 @@ The recipe below helps you to set a face cropper based on eye positions.
 
 .. literalinclude:: faq/facecrop.py
 
-.. figure:: ./faq/img/ada_cropped.png
\ No newline at end of file
+.. figure:: ./faq/img/ada_cropped.png
+
+
+How to choose the cropped positions ?
+=====================================
+
+The ideal cropped positions are dependent on the specific application you are using the face cropper in.
+Some face embedding extractors work well on loosely cropped faces, while others require the face to be tightly cropped.
+We provide a few reasonable defaults that are used in our implemented baselines. They are accessible through a function as follows :
+::
+
+    from bob.bio.face.utils import get_default_cropped_positions
+    mode = 'legacy'
+    cropped_image_size=(160, 160)
+    annotation_type='eyes-center'
+    cropped_positions = get_default_cropped_positions(mode, cropped_image_size, annotation_type)
+
+
+There are currently three available modes :
+
+* :code:`legacy` Tight crop, used in non neural-net baselines such as :code:`gabor-graph`, :code:`lgbphs` or :code:`lda`.
+  It is typically use with a 5:4 aspect ratio for the :code:`cropped_image_size`
+* :code:`dnn` Loose crop, used for neural-net baselines such as the ArcFace or FaceNet models.
+* :code:`pad` Tight crop used in some PAD baselines
+
+We present hereafter a visual example of those crops for the `eyes-center` annotation type.
+
+.. plot:: plot/default_crops.py
+    :include-source: True
diff --git a/doc/img/cropping_example_source.png b/doc/img/cropping_example_source.png
new file mode 100644
index 0000000000000000000000000000000000000000..cd4de029371ba1a224ea6006906a9640021d8295
Binary files /dev/null and b/doc/img/cropping_example_source.png differ
diff --git a/doc/implemented.rst b/doc/implemented.rst
index cf5801e365f162175a8f6e88c49ca06d934ff18f..a53938b64b5b8527bb7be9edaafc01a9a6ebe0ee 100644
--- a/doc/implemented.rst
+++ b/doc/implemented.rst
@@ -91,5 +91,9 @@ Algorithms
 
 .. automodule:: bob.bio.face.algorithm
 
+Utilities
+---------
+
+.. automodule:: bob.bio.face.utils
 
 .. include:: links.rst
diff --git a/doc/plot/default_crops.py b/doc/plot/default_crops.py
new file mode 100644
index 0000000000000000000000000000000000000000..c1b9ac6bd8c668199c56d7c63acdbcf9dada24c7
--- /dev/null
+++ b/doc/plot/default_crops.py
@@ -0,0 +1,43 @@
+import bob.io.image
+from bob.bio.face.utils import get_default_cropped_positions
+from bob.bio.face.preprocessor import FaceCrop
+import matplotlib.pyplot as plt
+
+src = bob.io.image.load("../img/cropping_example_source.png")
+modes = ["legacy", "dnn", "pad"]
+cropped_images = []
+
+
+SIZE = 160
+# Pick cropping mode
+for mode in modes:
+    if mode == "legacy":
+        cropped_image_size = (SIZE, 4 * SIZE // 5)
+    else:
+        cropped_image_size = (SIZE, SIZE)
+
+    annotation_type = "eyes-center"
+    # Load default cropped positions
+    cropped_positions = get_default_cropped_positions(
+        mode, cropped_image_size, annotation_type
+    )
+
+    # Instanciate cropper and crop
+    cropper = FaceCrop(
+        cropped_image_size=cropped_image_size,
+        cropped_positions=cropped_positions,
+        fixed_positions={"reye": (480, 380), "leye": (480, 650)},
+        color_channel="rgb",
+    )
+
+    cropped_images.append(cropper.transform([src])[0].astype("uint8"))
+
+
+# Visualize cropped images
+fig, axes = plt.subplots(2, 2, figsize=(10, 10))
+
+for i, (img, label) in enumerate(zip([src] + cropped_images, ["original"] + modes)):
+    ax = axes[i // 2, i % 2]
+    ax.axis("off")
+    ax.imshow(bob.io.image.to_matplotlib(img))
+    ax.set_title(label)