diff --git a/bob/bio/face/config/baseline/arcface_insightface.py b/bob/bio/face/config/baseline/arcface_insightface.py
index b2de000f4d30dbbbdc7c34592f19b249e69b976c..db164c301c785c23be4be2710351d9436b84f18b 100644
--- a/bob/bio/face/config/baseline/arcface_insightface.py
+++ b/bob/bio/face/config/baseline/arcface_insightface.py
@@ -1,7 +1,8 @@
 from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -12,10 +13,22 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        ArcFaceInsightFace(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
         color_channel="rgb",
     )
 
diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index 20e3651aab7a2701e5b670481beb46e6126e4b79..bdefacb8c1c1ea1167c86c8077fc17c8555aa35c 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -2,9 +2,11 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     FaceNetSanderberg_20170512_110547,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
+
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
     VanillaBiometricsPipeline,
@@ -14,12 +16,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
-    transformer = embedding_transformer_160x160(
-        FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
+
     algorithm = Distance()
 
     return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index 070ac4ddeb1a3daa0f6d750c55fd074af0d80e36..0e0aefecd11d7f325fc390e1fb19bcebe1ce7340 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -27,10 +27,10 @@ def lookup_config_from_database():
     return annotation_type, fixed_positions, memory_demanding
 
 
-def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
+def dnn_default_cropping(cropped_image_size, annotation_type):
     """
-    Computes the default cropped positions for the FaceCropper used with Facenet-like
-    Embedding extractors, proportionally to the target image size
+    Computes the default cropped positions for the FaceCropper used with Neural-Net based
+    extractors, proportionally to the target image size
 
 
     Parameters
@@ -51,8 +51,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
     """
     if isinstance(annotation_type, list):
         return [
-            embedding_transformer_default_cropping(cropped_image_size, item)
-            for item in annotation_type
+            dnn_default_cropping(cropped_image_size, item) for item in annotation_type
         ]
 
     CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
@@ -175,7 +174,6 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
 
 def make_cropper(
     cropped_image_size,
-    annotation_type,
     cropped_positions,
     fixed_positions=None,
     color_channel="rgb",
@@ -183,12 +181,12 @@ def make_cropper(
 ):
 
     face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
+        cropped_image_size=cropped_image_size,
         cropped_positions=cropped_positions,
         fixed_positions=fixed_positions,
-        dtype="float64",
         annotator=annotator,
+        color_channel=color_channel,
+        dtype="float64",
     )
 
     transform_extra_arguments = (
@@ -203,10 +201,10 @@ def make_cropper(
 def embedding_transformer(
     cropped_image_size,
     embedding,
-    annotation_type,
     cropped_positions,
     fixed_positions=None,
     color_channel="rgb",
+    annotator=None,
 ):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
@@ -216,18 +214,12 @@ def embedding_transformer(
        This will resize images to the requested `image_size`
 
     """
-    face_cropper = face_crop_solver(
-        cropped_image_size,
-        color_channel=color_channel,
+    face_cropper, transform_extra_arguments = make_cropper(
+        cropped_image_size=cropped_image_size,
         cropped_positions=cropped_positions,
         fixed_positions=fixed_positions,
-        dtype="float64",
-    )
-
-    transform_extra_arguments = (
-        None
-        if (cropped_positions is None or fixed_positions is not None)
-        else (("annotations", "annotations"),)
+        color_channel=color_channel,
+        annotator=annotator,
     )
 
     transformer = make_pipeline(
@@ -242,66 +234,6 @@ def embedding_transformer(
     return transformer
 
 
-def embedding_transformer_160x160(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
-):
-    """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-
-    .. warning::
-       This will resize images to :math:`160 \times 160`
-
-    """
-    cropped_positions = embedding_transformer_default_cropping(
-        (160, 160), annotation_type
-    )
-
-    return embedding_transformer(
-        (160, 160),
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
-        color_channel=color_channel,
-    )
-
-
-def embedding_transformer_112x112(
-    embedding, annotation_type, fixed_positions, color_channel="rgb"
-):
-    """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-
-    .. warning::
-       This will resize images to :math:`112 \times 112`
-
-    """
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-
-    else:
-        # Will use default
-        cropped_positions = embedding_transformer_default_cropping(
-            cropped_image_size, annotation_type
-        )
-
-    return embedding_transformer(
-        cropped_image_size,
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
-        color_channel=color_channel,
-    )
-
-
 def embedding_transformer_224x224(
     embedding, annotation_type, fixed_positions, color_channel="rgb"
 ):
@@ -319,15 +251,12 @@ def embedding_transformer_224x224(
         cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
     else:
         # Will use default
-        cropped_positions = embedding_transformer_default_cropping(
-            cropped_image_size, annotation_type
-        )
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
     return embedding_transformer(
-        cropped_image_size,
-        embedding,
-        annotation_type,
-        cropped_positions,
-        fixed_positions,
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
         color_channel=color_channel,
     )
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index 90bd0111248cfd6ebd07bb7f9eb7642b44f41c11..b532c815b07fe09cf307a8f267569585273fec10 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_Casia_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv1_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index c459709626a019701667d3276ad06e1f8873d6a8..db94eddd46344b6b48f569fc51c2ee375fdb6996 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv1_MsCeleb_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv1_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index cb82dee8f8e1f4a516721fd33f06a984a30b65f5..c20d856c366f263634eecf7987e66db8aeb737f5 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_Casia_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv2_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index fa502308c7629ba3d2d8487582cce6428603c4eb..d44585ebe97b16a075dcf194143eb92c02c6d5fa 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
     InceptionResnetv2_MsCeleb_CenterLoss_2018,
 )
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_160x160,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_160x160(
-        InceptionResnetv2_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
index aae95a2a7d5aed639dba7c308b1d54cbb89e99b8..2ca17ebeda8e954ffb45e85beb36911d673aeb92 100644
--- a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
@@ -1,7 +1,8 @@
 from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -12,10 +13,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
index f60903ace50188014553d8a6d25ebc06ef0cdbd4..c505666297de735a746dc362388d4946ee1bd398 100644
--- a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
@@ -1,8 +1,10 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
+
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
     VanillaBiometricsPipeline,
@@ -12,10 +14,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
index 0a6941d0e9c670a65dc7605dac0c5f38bc5b1788..05eaa49bc610ad3fc5aecbcc6a23977897e96890 100644
--- a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
@@ -1,7 +1,8 @@
 from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_112x112,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -12,10 +13,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
index f71e0a78bdd43a141028687b0e7acd70ee70cabb..619bcf82a093dfa60fb7bc2364ada55d5c983f65 100644
--- a/bob/bio/face/config/baseline/tf2_inception_resnet.py
+++ b/bob/bio/face/config/baseline/tf2_inception_resnet.py
@@ -2,9 +2,9 @@ from bob.extension import rc
 from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
 from bob.bio.face.preprocessor import FaceCrop
 from bob.bio.face.config.baseline.helpers import (
-    embedding_transformer_default_cropping,
-    embedding_transformer,
     lookup_config_from_database,
+    dnn_default_cropping,
+    embedding_transformer,
 )
 
 from sklearn.pipeline import make_pipeline
@@ -18,23 +18,22 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    CROPPED_IMAGE_SIZE = (160, 160)
-    CROPPED_POSITIONS = embedding_transformer_default_cropping(
-        CROPPED_IMAGE_SIZE, annotation_type=annotation_type
-    )
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
 
     extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
-
     embedding = InceptionResnetv2(
         checkpoint_path=extractor_path, memory_demanding=memory_demanding
     )
-
+    # ASSEMBLE TRANSFORMER
     transformer = embedding_transformer(
-        CROPPED_IMAGE_SIZE,
-        embedding,
-        annotation_type,
-        CROPPED_POSITIONS,
-        fixed_positions,
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
     )
 
     algorithm = Distance()