diff --git a/bob/bio/face/config/baseline/gabor_graph.py b/bob/bio/face/config/baseline/gabor_graph.py
index 6c79bffd407e26b327ca22574ba9f5c08b84b7a8..777f4ab1eac8c1931b5924c4f22cf9bee8790e88 100644
--- a/bob/bio/face/config/baseline/gabor_graph.py
+++ b/bob/bio/face/config/baseline/gabor_graph.py
@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
     BioAlgorithmLegacy,
 )
-from bob.bio.face.config.baseline.helpers import crop_80x64, lookup_config_from_database
+from bob.bio.face.config.baseline.helpers import (
+    lookup_config_from_database,
+    legacy_default_cropping,
+    make_cropper,
+)
 import math
 import numpy as np
 import bob.bio.face
@@ -20,14 +24,6 @@ logger = logging.getLogger(__name__)
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
-def get_cropper(annotation_type, fixed_positions=None):
-    # Cropping
-    face_cropper, transform_extra_arguments = crop_80x64(
-        annotation_type, fixed_positions, color_channel="gray"
-    )
-    return face_cropper, transform_extra_arguments
-
-
 def get_pipeline(face_cropper, transform_extra_arguments):
     preprocessor = bob.bio.face.preprocessor.INormLBP(
         face_cropper=face_cropper, dtype=np.float64
@@ -75,9 +71,22 @@ def get_pipeline(face_cropper, transform_extra_arguments):
 
 def load(annotation_type, fixed_positions=None):
     ####### SOLVING THE FACE CROPPER TO BE USED ##########
-    face_cropper, transform_extra_arguments = get_cropper(
-        annotation_type, fixed_positions
+
+    # Define cropped positions
+    CROPPED_IMAGE_HEIGHT = 80
+    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
+
+    # Cropping
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        annotation_type=annotation_type,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="gray",
     )
+
     return get_pipeline(face_cropper, transform_extra_arguments)
 
 
diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index 41f3e837841dfc6716e3ca82b07d7eb6983dea76..070ac4ddeb1a3daa0f6d750c55fd074af0d80e36 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -173,6 +173,33 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
     return cropped_positions
 
 
+def make_cropper(
+    cropped_image_size,
+    annotation_type,
+    cropped_positions,
+    fixed_positions=None,
+    color_channel="rgb",
+    annotator=None,
+):
+
+    face_cropper = face_crop_solver(
+        cropped_image_size,
+        color_channel=color_channel,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        dtype="float64",
+        annotator=annotator,
+    )
+
+    transform_extra_arguments = (
+        None
+        if (cropped_positions is None or fixed_positions is not None)
+        else (("annotations", "annotations"),)
+    )
+
+    return face_cropper, transform_extra_arguments
+
+
 def embedding_transformer(
     cropped_image_size,
     embedding,
@@ -304,58 +331,3 @@ def embedding_transformer_224x224(
         fixed_positions,
         color_channel=color_channel,
     )
-
-
-def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
-    """
-    Crops a face to :math:`80 \times 64`
-
-
-    Parameters
-    ----------
-
-       annotation_type: str
-          Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None
-
-       fixed_positions: tuple
-          A tuple containing the annotations. This is used in case your input is already registered
-          with fixed positions (eyes or bounding box)
-
-       color_channel: str
-
-
-    Returns
-    -------
-
-      face_cropper:
-         A face cropper to be used
-
-      transform_extra_arguments:
-         The parameters to the transformer
-
-    """
-    color_channel = color_channel
-    dtype = np.float64
-
-    # Cropping
-    CROPPED_IMAGE_HEIGHT = 80
-    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
-    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-
-    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
-
-    face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        dtype=dtype,
-    )
-
-    transform_extra_arguments = (
-        None
-        if (cropped_positions is None or fixed_positions is not None)
-        else (("annotations", "annotations"),)
-    )
-
-    return face_cropper, transform_extra_arguments
diff --git a/bob/bio/face/config/baseline/lda.py b/bob/bio/face/config/baseline/lda.py
index 062f18f6423734e9e4d7fa4dcbc881a8ec4a093a..ee8877b2b4d841bc48a15ed6c787c2aad9a609c6 100644
--- a/bob/bio/face/config/baseline/lda.py
+++ b/bob/bio/face/config/baseline/lda.py
@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
     BioAlgorithmLegacy,
 )
-from bob.bio.face.config.baseline.helpers import crop_80x64, lookup_config_from_database
+from bob.bio.face.config.baseline.helpers import (
+    lookup_config_from_database,
+    legacy_default_cropping,
+    make_cropper,
+)
 import numpy as np
 import bob.bio.face
 from sklearn.pipeline import make_pipeline
@@ -24,9 +28,19 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 ####### SOLVING THE FACE CROPPER TO BE USED ##########
 def load(annotation_type, fixed_positions=None):
 
+    # Define cropped positions
+    CROPPED_IMAGE_HEIGHT = 80
+    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
+
     # Cropping
-    face_cropper, transform_extra_arguments = crop_80x64(
-        annotation_type, fixed_positions, color_channel="gray"
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        annotation_type=annotation_type,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="gray",
     )
 
     preprocessor = bob.bio.face.preprocessor.TanTriggs(
@@ -57,7 +71,9 @@ def load(annotation_type, fixed_positions=None):
     ### BIOMETRIC ALGORITHM
 
     algorithm = BioAlgorithmLegacy(
-        lda, base_dir=tempdir, projector_file=os.path.join(tempdir, "Projector.hdf5"),
+        lda,
+        base_dir=tempdir,
+        projector_file=os.path.join(tempdir, "Projector.hdf5"),
     )
 
     return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/lgbphs.py b/bob/bio/face/config/baseline/lgbphs.py
index 902f43cfd55d3de77ce14f426bce118d1e5ae98d..353bb05413d79ba066b3a9b626c70837497d6532 100644
--- a/bob/bio/face/config/baseline/lgbphs.py
+++ b/bob/bio/face/config/baseline/lgbphs.py
@@ -3,7 +3,11 @@ from bob.bio.base.pipelines.vanilla_biometrics import (
     VanillaBiometricsPipeline,
     BioAlgorithmLegacy,
 )
-from bob.bio.face.config.baseline.helpers import crop_80x64, lookup_config_from_database
+from bob.bio.face.config.baseline.helpers import (
+    lookup_config_from_database,
+    legacy_default_cropping,
+    make_cropper,
+)
 import math
 import numpy as np
 import bob.bio.face
@@ -16,14 +20,6 @@ import bob.math
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
 
 
-def get_cropper(annotation_type, fixed_positions=None):
-    # Cropping
-    face_cropper, transform_extra_arguments = crop_80x64(
-        annotation_type, fixed_positions, color_channel="gray"
-    )
-    return face_cropper, transform_extra_arguments
-
-
 def get_pipeline(face_cropper, transform_extra_arguments):
     preprocessor = bob.bio.face.preprocessor.TanTriggs(
         face_cropper=face_cropper, dtype=np.float64
@@ -65,10 +61,22 @@ def get_pipeline(face_cropper, transform_extra_arguments):
 
 
 def load(annotation_type, fixed_positions=None):
+    # Define cropped positions
+    CROPPED_IMAGE_HEIGHT = 80
+    CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
+    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
+
     ####### SOLVING THE FACE CROPPER TO BE USED ##########
-    face_cropper, transform_extra_arguments = get_cropper(
-        annotation_type, fixed_positions
+    # Cropping
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
+        annotation_type=annotation_type,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="gray",
     )
+
     return get_pipeline(face_cropper, transform_extra_arguments)