diff --git a/bob/bio/face/config/baseline/afffe.py b/bob/bio/face/config/baseline/afffe.py
index 68d4964c75c4b7cffcfcb4e5959c66e61f593644..840ae58e9ffbe5bd8a455a0c6c9b292474ae4968 100644
--- a/bob/bio/face/config/baseline/afffe.py
+++ b/bob/bio/face/config/baseline/afffe.py
@@ -1,61 +1,15 @@
-import bob.bio.base
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.embeddings.pytorch import AFFFE_2021
-from bob.pipelines import wrap
-import scipy.spatial
-from bob.bio.base.pipelines.vanilla_biometrics import Distance
-from sklearn.pipeline import make_pipeline
-from bob.pipelines import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
+from bob.bio.face.embeddings.pytorch import afffe_baseline
+from bob.bio.face.utils import lookup_config_from_database
 
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-
-else:
-    annotation_type = None
-    fixed_positions = None
-
-cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
-
-preprocessor_transformer = FaceCrop(
-    cropped_image_size=(224, 224),
-    cropped_positions=cropped_positions,
-    color_channel="rgb",
-    fixed_positions=fixed_positions,
-    allow_upside_down_normalized_faces=True,
+annotation_type, fixed_positions, _ = lookup_config_from_database(
+    locals().get("database")
 )
 
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
 
-extractor_transformer = AFFFE_2021()
+def load(annotation_type, fixed_positions=None):
+    return afffe_baseline(annotation_type, fixed_positions)
 
-# Algorithm
-algorithm = Distance(
-    distance_function=scipy.spatial.distance.cosine, is_distance_function=True
-)
-
-# Chain the Transformers together
-transformer = make_pipeline(
-    wrap(
-        ["sample"],
-        preprocessor_transformer,
-        transform_extra_arguments=transform_extra_arguments,
-    ),
-    wrap(["sample"], extractor_transformer)
-    # Add more transformers here if needed
-)
 
+pipeline = load(annotation_type, fixed_positions)
 
-# Assemble the Vanilla Biometric pipeline and execute
-pipeline = VanillaBiometricsPipeline(transformer, algorithm)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/arcface_insightface.py b/bob/bio/face/config/baseline/arcface_insightface.py
index c2f3fc9a779e09ca59b3de0c59d2976fa7adeb62..b65ea11ed686aa57909fd10ad7aa9aa99515d76f 100644
--- a/bob/bio/face/config/baseline/arcface_insightface.py
+++ b/bob/bio/face/config/baseline/arcface_insightface.py
@@ -1,21 +1,5 @@
-from bob.bio.face.embeddings.mxnet import ArcFaceInsightFace_LResNet100
-from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-else:
-    annotation_type = None
-    fixed_positions = None
-    memory_demanding = False
+from bob.bio.face.embeddings.mxnet import arcface_baseline
+from bob.bio.face.utils import lookup_config_from_database
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
@@ -23,19 +7,12 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    transformer = embedding_transformer_112x112(
-        ArcFaceInsightFace_LResNet100(memory_demanding=memory_demanding),
-        annotation_type,
-        fixed_positions,
-        color_channel="rgb",
-    )
 
     return arcface_baseline(
-        embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
+        embedding=ArcFaceInsightFace_LResNet100(memory_demanding=memory_demanding),
         annotation_type=annotation_type,
         fixed_positions=fixed_positions,
     )
 
 
 pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index 510d9287fba4613f072482f0f57026d8fec824ba..dba9045b0df594b6b7ef99c88f48af66a7f5dbe0 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -1,8 +1,5 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    FaceNetSanderberg_20170512_110547,
-)
+from bob.bio.face.embeddings.tensorflow import facenet_sanderberg_20170512_110547
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
@@ -10,12 +7,9 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
 
 
 def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+    return facenet_sanderberg_20170512_110547(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
 pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index 149c074270a1001513dfbeacb0cbf09325784b28..1bb1b0bf61ed5cbb35ff97f84f37a19033bc33b4 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -1,23 +1,15 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv1_Casia_CenterLoss_2018,
-)
+from bob.bio.face.embeddings.tensorflow import inception_resnet_v1_casia_centerloss_2018
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v1_casia_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index f7ce09f7d1d2bd57eee0490b269c0b5a857c1a8d..92c329481a689b7c2d254deca5da51c8db1a68b1 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -1,24 +1,17 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv1_MsCeleb_CenterLoss_2018,
+from bob.bio.face.embeddings.tensorflow import (
+    inception_resnet_v1_msceleb_centerloss_2018,
 )
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v1_msceleb_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index eadd91541b85511f31e48ad97140ba6bc5dc5d61..0cc4eb79003316dee55435b383f18217046635f8 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -1,24 +1,15 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv2_Casia_CenterLoss_2018,
-)
+from bob.bio.face.embeddings.tensorflow import inception_resnet_v2_casia_centerloss_2018
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v2_casia_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index 0be122d36c854becb94ef192ecc79d5134682975..01e4d6ee0d6e543b4048aa204de48c65e126e076 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -1,23 +1,17 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv2_MsCeleb_CenterLoss_2018,
+from bob.bio.face.embeddings.tensorflow import (
+    inception_resnet_v2_msceleb_centerloss_2018,
 )
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v2_msceleb_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
index 2355494888b81e8d8f7e8e49dc4ba8a3ba43daea..69423f543a4f8485e81e8cc4510d634cb2ebcb5a 100644
--- a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
@@ -1,21 +1,15 @@
-from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
+from bob.bio.face.embeddings.tensorflow import mobilenetv2_msceleb_arcface_2021
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-
-    return arcface_baseline(
-        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return mobilenetv2_msceleb_arcface_2021(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py
deleted file mode 100644
index f3c9c2f175ea5bfd5849615fcb8dbfa42b211502..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/mxnet_pipe.py
+++ /dev/null
@@ -1,65 +0,0 @@
-import bob.bio.base
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import MxNetModel
-from bob.bio.base.algorithm import Distance
-from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
-import scipy.spatial
-from bob.bio.base.pipelines.vanilla_biometrics import Distance
-from sklearn.pipeline import make_pipeline
-from bob.pipelines import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
-
-
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-
-else:
-    annotation_type = None
-    fixed_positions = None
-
-
-cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
-
-preprocessor_transformer = FaceCrop(
-    cropped_image_size=(112, 112),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
-    color_channel="rgb",
-    fixed_positions=fixed_positions,
-)
-
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
-
-extractor_transformer = MxNetModel()
-
-algorithm = Distance(
-    distance_function=scipy.spatial.distance.cosine, is_distance_function=True
-)
-
-
-# Chain the Transformers together
-transformer = make_pipeline(
-    wrap(
-        ["sample"],
-        preprocessor_transformer,
-        transform_extra_arguments=transform_extra_arguments,
-    ),
-    wrap(["sample"], extractor_transformer)
-    # Add more transformers here if needed
-)
-
-
-# Assemble the Vanilla Biometric pipeline and execute
-pipeline = VanillaBiometricsPipeline(transformer, algorithm)
-transformer = pipeline.transformer
-
-
diff --git a/bob/bio/face/config/baseline/mxnet_tinyface.py b/bob/bio/face/config/baseline/mxnet_tinyface.py
index 451412aa10f808b4a2858355b152ba8207e53449..078410d1ec25f3b3bba274ff0bda4b87ee6f0237 100644
--- a/bob/bio/face/config/baseline/mxnet_tinyface.py
+++ b/bob/bio/face/config/baseline/mxnet_tinyface.py
@@ -14,19 +14,24 @@ from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
 
 annotator_transformer = BobIpTinyface()
 
-preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',annotator=annotator_transformer)
+preprocessor_transformer = FaceCrop(
+    cropped_image_size=(112, 112),
+    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    color_channel="rgb",
+    annotator=annotator_transformer,
+)
 
 extractor_transformer = MxNetModel()
 
 
-algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
+algorithm = Distance(
+    distance_function=scipy.spatial.distance.cosine, is_distance_function=True
+)
 
 transformer = make_pipeline(
-    wrap(["sample"], preprocessor_transformer),
-    wrap(["sample"], extractor_transformer)
+    wrap(["sample"], preprocessor_transformer), wrap(["sample"], extractor_transformer)
 )
 
 pipeline = VanillaBiometricsPipeline(transformer, algorithm)
 transformer = pipeline.transformer
 
-
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
deleted file mode 100644
index 90788963d6440c828778638cbc258eaa702a1376..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py
+++ /dev/null
@@ -1,75 +0,0 @@
-import bob.bio.base
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import PyTorchLibraryModel
-from facenet_pytorch import InceptionResnetV1
-from bob.bio.base.algorithm import Distance
-from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
-import scipy.spatial
-from bob.bio.base.pipelines.vanilla_biometrics import Distance
-from sklearn.pipeline import make_pipeline
-from bob.pipelines import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
-
-
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-
-else:
-    annotation_type = None
-    fixed_positions = None
-
-
-cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
-
-cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
-
-preprocessor_transformer = FaceCrop(
-    cropped_image_size=(224, 224),
-    cropped_positions={"leye": (110, 144), "reye": (110, 96)},
-    color_channel="rgb",
-    fixed_positions=fixed_positions,
-)
-
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
-
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
-
-model = InceptionResnetV1(pretrained="vggface2").eval()
-extractor_transformer = PyTorchLibraryModel(model=model)
-
-
-algorithm = Distance(
-    distance_function=scipy.spatial.distance.cosine, is_distance_function=True
-)
-
-
-# Chain the Transformers together
-transformer = make_pipeline(
-    wrap(
-        ["sample"],
-        preprocessor_transformer,
-        transform_extra_arguments=transform_extra_arguments,
-    ),
-    wrap(["sample"], extractor_transformer)
-    # Add more transformers here if needed
-)
-
-
-# Assemble the Vanilla Biometric pipeline and execute
-pipeline = VanillaBiometricsPipeline(transformer, algorithm)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
index 0560a97a62f42d33f5d8c54c3177760db61ab9fc..08f7dd62b40024c00658789cdcf5b99eb8317e5d 100644
--- a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
@@ -1,20 +1,15 @@
-from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
+from bob.bio.face.embeddings.tensorflow import resnet50_msceleb_arcface_2021
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return arcface_baseline(
-        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return resnet50_msceleb_arcface_2021(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
index 64d6ec4c84fbb43c1ef3061c070c6a568e7d74d6..475e7efd3d21e64aae8f424ed66baca1efa47d28 100644
--- a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
@@ -1,20 +1,15 @@
-from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
+from bob.bio.face.embeddings.tensorflow import resnet50_vgg2_arcface_2021
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return arcface_baseline(
-        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return resnet50_vgg2_arcface_2021(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/templates.py b/bob/bio/face/config/baseline/templates.py
deleted file mode 100644
index 415ecd2831432dfb96b94ff4eae814409cdc2088..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/templates.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from bob.bio.face.utils import (
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-
-def arcface_baseline(embedding, annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-    else:
-        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
-        embedding=embedding,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
-    )
-
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
-
-def facenet_baseline(embedding, annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
-        embedding=embedding,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
-    )
-
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
deleted file mode 100644
index 38d78c53de52cac64e1e668f98c72635789adf99..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/tf2_inception_resnet.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from bob.extension import rc
-from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
-from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
-
-annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
-    locals().get("database")
-)
-
-
-def load(annotation_type, fixed_positions=None):
-    extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
-    embedding = InceptionResnetv2(
-        checkpoint_path=extractor_path, memory_demanding=memory_demanding
-    )
-    return facenet_baseline(
-        embedding=embedding,
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
-    )
-
-
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/tf_pipe.py b/bob/bio/face/config/baseline/tf_pipe.py
deleted file mode 100644
index c4ea24a68baad4ff57d57d8f773f249284a01b6b..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/tf_pipe.py
+++ /dev/null
@@ -1,66 +0,0 @@
-import bob.bio.base
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import TensorFlowModel
-from bob.bio.base.algorithm import Distance
-from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
-import scipy.spatial
-from bob.bio.base.pipelines.vanilla_biometrics import Distance
-from sklearn.pipeline import make_pipeline
-from bob.pipelines import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
-
-
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-
-else:
-    annotation_type = None
-    fixed_positions = None
-
-
-# Preprocessor
-cropped_positions = {"leye": (80, 100), "reye": (80, 60)}
-
-preprocessor_transformer = FaceCrop(
-    cropped_image_size=(160, 160),
-    cropped_positions={"leye": (80, 100), "reye": (80, 60)},
-    color_channel="rgb",
-    fixed_positions=fixed_positions,
-)
-
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
-
-# Extractor
-extractor_transformer = TensorFlowModel()
-
-# Algorithm
-algorithm = Distance(
-    distance_function=scipy.spatial.distance.cosine, is_distance_function=True
-)
-
-
-# Chain the Transformers together
-transformer = make_pipeline(
-    wrap(
-        ["sample"],
-        preprocessor_transformer,
-        transform_extra_arguments=transform_extra_arguments,
-    ),
-    wrap(["sample"], extractor_transformer)
-    # Add more transformers here if needed
-)
-
-
-# Assemble the Vanilla Biometric pipeline and execute
-pipeline = VanillaBiometricsPipeline(transformer, algorithm)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/vgg16_oxford.py b/bob/bio/face/config/baseline/vgg16_oxford.py
index dd513cf871785f215a7164099fca1deba83293f6..23bf274acc7fe480f7846241b74a95ff1076671b 100644
--- a/bob/bio/face/config/baseline/vgg16_oxford.py
+++ b/bob/bio/face/config/baseline/vgg16_oxford.py
@@ -1,63 +1,14 @@
-import bob.bio.base
-from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.embeddings.opencv import VGG16_Oxford
-from bob.pipelines import wrap
-import scipy.spatial
-from bob.bio.base.pipelines.vanilla_biometrics import Distance
+from bob.bio.face.embeddings.opencv import vgg16_oxford_baseline
+from bob.bio.face.utils import lookup_config_from_database
 
-from sklearn.pipeline import make_pipeline
-from bob.pipelines import wrap
-from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
 
-memory_demanding = False
-if "database" in locals():
-    annotation_type = database.annotation_type
-    fixed_positions = database.fixed_positions
-    memory_demanding = (
-        database.memory_demanding if hasattr(database, "memory_demanding") else False
-    )
-
-else:
-    annotation_type = None
-    fixed_positions = None
-
-cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
-preprocessor_transformer = FaceCrop(
-    cropped_image_size=(224, 224),
-    cropped_positions=cropped_positions,
-    color_channel="rgb",
-    fixed_positions=fixed_positions,
-    allow_upside_down_normalized_faces=True,
+annotation_type, fixed_positions, _ = lookup_config_from_database(
+    locals().get("database")
 )
 
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
-# Extractor
-
-extractor_transformer = VGG16_Oxford()
 
-
-# Algorithm
-algorithm = Distance(
-    distance_function=scipy.spatial.distance.cosine, is_distance_function=True
-)
-
-# Chain the Transformers together
-transformer = make_pipeline(
-    wrap(
-        ["sample"],
-        preprocessor_transformer,
-        transform_extra_arguments=transform_extra_arguments,
-    ),
-    wrap(["sample"], extractor_transformer)
-    # Add more transformers here if needed
-)
+def load(annotation_type, fixed_positions=None):
+    return vgg16_oxford_baseline(annotation_type, fixed_positions)
 
 
-# Assemble the Vanilla Biometric pipeline and execute
-pipeline = VanillaBiometricsPipeline(transformer, algorithm)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions)
diff --git a/bob/bio/face/embeddings/mobilenet_v2.py b/bob/bio/face/embeddings/mobilenet_v2.py
deleted file mode 100644
index 4966583ff6388b44706eec199fc3e6dcb8768359..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/mobilenet_v2.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from bob.bio.face.embeddings import download_model
-
-
-from .tf2_inception_resnet import TransformTensorflow
-import pkg_resources
-import os
-from bob.extension import rc
-import tensorflow as tf
-
-
-class MobileNetv2_MsCeleb_ArcFace_2021(TransformTensorflow):
-    """
-    MobileNet Backbone trained with the MSCeleb 1M database.
-
-    The bottleneck layer (a.k.a embedding) has 512d.
-
-    The configuration file used to trained is:
-
-    ```yaml
-    batch-size: 128
-    face-size: 112
-    face-output_size: 112
-    n-classes: 85742
-
-
-    ## Backbone
-    backbone: 'mobilenet-v2'
-    head: 'arcface'
-    s: 10
-    bottleneck: 512
-    m: 0.5
-
-    # Training parameters
-    solver: "sgd"
-    lr: 0.01
-    dropout-rate: 0.5
-    epochs: 500
-
-
-    train-tf-record-path: "<PATH>"
-    validation-tf-record-path: "<PATH>"
-
-    ```
-
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "mobilenet-v2-msceleb-arcface-2021"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.mobilenet-v2-msceleb-arcface-2021"] is None
-            else rc["bob.bio.face.models.mobilenet-v2-msceleb-arcface-2021"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021.tar.gz",
-        ]
-
-        download_model(checkpoint_path, urls, "mobilenet-v2-msceleb-arcface-2021.tar.gz")
-
-        super(MobileNetv2_MsCeleb_ArcFace_2021, self).__init__(
-            checkpoint_path,
-            preprocessor=lambda X: X / 255.0,
-            memory_demanding=memory_demanding,
-        )
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)[0]
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
-
diff --git a/bob/bio/face/embeddings/mxnet.py b/bob/bio/face/embeddings/mxnet.py
index 8e8d897dbe98f2f415ea80be7fc8e46c17d083f5..5d14bed7592c4c3bff9d075452e3e6deae5643f5 100644
--- a/bob/bio/face/embeddings/mxnet.py
+++ b/bob/bio/face/embeddings/mxnet.py
@@ -147,3 +147,40 @@ class ArcFaceInsightFace_LResNet100(MxNetTransformer):
         model.set_params(arg_params, aux_params)
 
         self.model = model
+
+
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+def arcface_baseline(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
diff --git a/bob/bio/face/embeddings/mxnet_models.py b/bob/bio/face/embeddings/mxnet_models.py
deleted file mode 100644
index d8eb786491356ca1bfc0b6893d895bb49200125a..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/mxnet_models.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""
-Load and predict using checkpoints based on mxnet
-"""
-
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.utils import check_array
-import numpy as np
-from bob.bio.face.embeddings import download_model
-import pkg_resources
-import os
-from bob.extension import rc
-
-
-class ArcFaceInsightFace(TransformerMixin, BaseEstimator):
-    """
-    ArcFace from Insight Face.
-
-    Model and source code taken from the repository
-
-    https://github.com/deepinsight/insightface/blob/master/python-package/insightface/model_zoo/face_recognition.py
-
-    """
-
-    def __init__(self, use_gpu=False, memory_demanding=False, **kwargs):
-        super().__init__(**kwargs)
-        self.model = None
-        self.use_gpu = use_gpu
-        self.memory_demanding = memory_demanding
-
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "arcface_insightface"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.ArcFaceInsightFace"] is None
-            else rc["bob.bio.face.models.ArcFaceInsightFace"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz"
-        ]
-
-        download_model(checkpoint_path, urls, "arcface_r100_v1_mxnet.tar.gz")
-
-        self.checkpoint_path = checkpoint_path
-
-    def load_model(self):
-        import mxnet as mx
-
-        sym, arg_params, aux_params = mx.model.load_checkpoint(
-            os.path.join(self.checkpoint_path, "model"), 0
-        )
-
-        all_layers = sym.get_internals()
-        sym = all_layers["fc1_output"]
-
-        # LOADING CHECKPOINT
-        ctx = mx.gpu() if self.use_gpu else mx.cpu()
-        model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
-        data_shape = (1, 3, 112, 112)
-        model.bind(data_shapes=[("data", data_shape)])
-        model.set_params(arg_params, aux_params)
-
-        # warmup
-        data = mx.nd.zeros(shape=data_shape)
-        db = mx.io.DataBatch(data=(data,))
-        model.forward(db, is_train=False)
-        embedding = model.get_outputs()[0].asnumpy()
-        self.model = model
-
-    def transform(self, X):
-        import mxnet as mx
-
-        if self.model is None:
-            self.load_model()
-
-        X = check_array(X, allow_nd=True)
-
-        def _transform(X):
-            X = mx.nd.array(X)
-            db = mx.io.DataBatch(data=(X,))
-            self.model.forward(db, is_train=False)
-            return self.model.get_outputs()[0].asnumpy()
-
-        if self.memory_demanding:
-            return np.array([_transform(x[None, ...]) for x in X])
-        else:
-            return _transform(X)
-
-    def __getstate__(self):
-        # Handling unpicklable objects
-        d = self.__dict__.copy()
-        d["model"] = None
-        return d
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/embeddings/opencv.py b/bob/bio/face/embeddings/opencv.py
index bcf1c7d0a66fedd76ade88d945a3703936909f23..7965914ed00a8b4f37f34e76a6b227c34c6b79cd 100644
--- a/bob/bio/face/embeddings/opencv.py
+++ b/bob/bio/face/embeddings/opencv.py
@@ -9,6 +9,15 @@ from sklearn.base import TransformerMixin, BaseEstimator
 from sklearn.utils import check_array
 import os
 from bob.extension.download import get_file
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
 
 
 class OpenCVTransformer(TransformerMixin, BaseEstimator):
@@ -143,3 +152,40 @@ class VGG16_Oxford(OpenCVTransformer):
         net = cv2.dnn.readNet(self.checkpoint_path, self.config)
         self.model = net
 
+
+def vgg16_oxford_baseline(annotation_type, fixed_positions=None):
+    """
+    Get the VGG16 pipeline which will crop the face :math:`224 \times 224`
+    use the :py:class:`VGG16_Oxford`
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+    """
+
+    # DEFINE CROPPING
+    cropped_image_size = (224, 224)
+
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=VGG16_Oxford(),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/embeddings/pytorch.py b/bob/bio/face/embeddings/pytorch.py
index 96f72667213afc4125cee5fd123df9843cc30b5e..3e016b4a0da0135c3364c41c400c89ca6d01dbe8 100644
--- a/bob/bio/face/embeddings/pytorch.py
+++ b/bob/bio/face/embeddings/pytorch.py
@@ -9,6 +9,15 @@ import numpy as np
 import imp
 import os
 from bob.extension.download import get_file
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
 
 
 class PyTorchModel(TransformerMixin, BaseEstimator):
@@ -119,3 +128,40 @@ class AFFFE_2021(PyTorchModel):
 
         self.model = network
 
+
+def afffe_baseline(annotation_type, fixed_positions=None):
+    """
+    Get the AFFFE pipeline which will crop the face :math:`224 \times 224`
+    use the :py:class:`AFFFE_2021`
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+    """
+
+    # DEFINE CROPPING
+    cropped_image_size = (224, 224)
+
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=AFFFE_2021(),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/embeddings/resnet50.py b/bob/bio/face/embeddings/resnet50.py
deleted file mode 100644
index c542c39d734e7ec7d402e3834359977f406fe4a8..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/resnet50.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from bob.bio.face.embeddings import download_model
-
-
-from .tf2_inception_resnet import TransformTensorflow
-import pkg_resources
-import os
-from bob.extension import rc
-import tensorflow as tf
-
-
-class Resnet50_MsCeleb_ArcFace_2021(TransformTensorflow):
-    """
-    Resnet50 Backbone trained with the MSCeleb 1M database.
-
-    The bottleneck layer (a.k.a embedding) has 512d.
-
-    The configuration file used to trained is:
-
-    ```yaml
-    batch-size: 128
-    face-size: 112
-    face-output_size: 112
-    n-classes: 85742
-
-
-    ## Backbone
-    backbone: 'resnet50'
-    head: 'arcface'
-    s: 10
-    bottleneck: 512
-    m: 0.5
-
-    # Training parameters
-    solver: "sgd"
-    lr: 0.01
-    dropout-rate: 0.5
-    epochs: 500
-
-
-    train-tf-record-path: "<PATH>"
-    validation-tf-record-path: "<PATH>"
-
-    ```
-
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "resnet50_msceleb_arcface_2021"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.resnet50_msceleb_arcface_2021"] is None
-            else rc["bob.bio.face.models.resnet50_msceleb_arcface_2021"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_msceleb_arcface_2021.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_msceleb_arcface_2021.tar.gz",
-        ]
-
-        download_model(checkpoint_path, urls, "resnet50_msceleb_arcface_2021.tar.gz")
-
-        super(Resnet50_MsCeleb_ArcFace_2021, self).__init__(
-            checkpoint_path,
-            preprocessor=lambda X: X / 255.0,
-            memory_demanding=memory_demanding,
-        )
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)[0]
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
-
-
-class Resnet50_VGG2_ArcFace_2021(TransformTensorflow):
-    """
-    Resnet50 Backbone trained with the VGG2 database.
-
-    The bottleneck layer (a.k.a embedding) has 512d.
-
-    The configuration file used to trained is:
-
-    ```yaml
-    batch-size: 128
-    face-size: 112
-    face-output_size: 112
-    n-classes: 8631
-
-
-    ## Backbone
-    backbone: 'resnet50'
-    head: 'arcface'
-    s: 64
-    bottleneck: 512
-    m: 0.5
-
-    # Training parameters
-    solver: "sgd"
-    lr: 0.1
-    dropout-rate: 0.5
-    epochs: 1047
-
-
-    train-tf-record-path: "<PATH>"
-    validation-tf-record-path: "<PATH>"
-
-    ```
-
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "resnet50_vgg2_arcface_2021"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.resnet50_vgg2_arcface_2021"] is None
-            else rc["bob.bio.face.models.resnet50_vgg2_arcface_2021"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
-        ]
-
-        download_model(checkpoint_path, urls, "resnet50_vgg2_arcface_2021.tar.gz")
-
-        super(Resnet50_VGG2_ArcFace_2021, self).__init__(
-            checkpoint_path,
-            preprocessor=lambda X: X / 255.0,
-            memory_demanding=memory_demanding,
-        )
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
diff --git a/bob/bio/face/embeddings/tensorflow.py b/bob/bio/face/embeddings/tensorflow.py
index 161b53a2b4180d96765903318e62412603758bdf..d37243b18963373f469b112d4a4a2479a118da8d 100644
--- a/bob/bio/face/embeddings/tensorflow.py
+++ b/bob/bio/face/embeddings/tensorflow.py
@@ -14,6 +14,16 @@ from sklearn.utils import check_array
 import numpy as np
 import tensorflow as tf
 
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
 
 def sanderberg_rescaling():
     # FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
@@ -472,3 +482,288 @@ class MobileNetv2_MsCeleb_ArcFace_2021(TensorflowTransformer):
         embeddings = tf.math.l2_normalize(prelogits, axis=-1)
         return embeddings
 
+
+def facenet_template(embedding, annotation_type, fixed_positions=None):
+    """
+    Facenet baseline template.
+    This one will crop the face at :math:`160 \times 160`
+    
+    Parameters
+    ----------
+
+      embedding: obj
+         Transformer that takes a cropped face and extract the embeddings
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+    """
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def resnet_template(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def resnet50_msceleb_arcface_2021(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Resnet50 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`Resnet50_MsCeleb_ArcFace_2021` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def resnet50_vgg2_arcface_2021(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Resnet50 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`Resnet50_VGG2_ArcFace_2021` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def mobilenetv2_msceleb_arcface_2021(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the MobileNet pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`MobileNetv2_MsCeleb_ArcFace_2021` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def facenet_sanderberg_20170512_110547(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Facenet pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`FaceNetSanderberg_20170512_110547` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v1_casia_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v1 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv1_Casia_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v2_casia_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v2 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv2_Casia_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v1_msceleb_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v1 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv1_MsCeleb_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v2_msceleb_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v2 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv2_MsCeleb_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
diff --git a/bob/bio/face/embeddings/tf2_inception_resnet.py b/bob/bio/face/embeddings/tf2_inception_resnet.py
deleted file mode 100644
index b462f3a885c67153b15a4e5f930637e9ebd14cee..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/tf2_inception_resnet.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import tensorflow as tf
-from bob.learn.tensorflow.utils.image import to_channels_last
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.utils import check_array
-
-from tensorflow.keras import Sequential
-from tensorflow.keras.layers.experimental import preprocessing
-from bob.extension import rc
-from functools import partial
-import pkg_resources
-import os
-from bob.bio.face.embeddings import download_model
-import numpy as np
-
-
-def sanderberg_rescaling():
-    # FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
-    # [-0.99609375, 0.99609375]
-    preprocessor = preprocessing.Rescaling(scale=1 / 128, offset=-127.5 / 128)
-    return preprocessor
-
-
-class TransformTensorflow(TransformerMixin, BaseEstimator):
-    """
-    Base Transformer for Tensorflow architectures.
-
-    Szegedy, Christian, et al. "Inception-v4, inception-resnet and the impact of residual connections on learning." arXiv preprint arXiv:1602.07261 (2016).
-
-    Parameters
-    ----------
-
-    checkpoint_path: str
-       Path containing the checkpoint
-
-    preprocessor:
-        Preprocessor function
-
-    memory_demanding bool
-        If `True`, the `transform` method will run one sample at the time.
-        This is useful when there is not enough memory available to forward big chucks of data.
-    """
-
-    def __init__(
-        self, checkpoint_path, preprocessor=None, memory_demanding=False, **kwargs
-    ):
-        super().__init__(**kwargs)
-        self.checkpoint_path = checkpoint_path
-        self.model = None
-        self.preprocessor = preprocessor
-        self.memory_demanding = memory_demanding
-
-    def load_model(self):
-        self.model = tf.keras.models.load_model(self.checkpoint_path)
-
-    def transform(self, X):
-        def _transform(X):
-            X = tf.convert_to_tensor(X)
-            X = to_channels_last(X)
-
-            if X.shape[-3:] != self.model.input_shape[-3:]:
-                raise ValueError(
-                    f"Image shape {X.shape} not supported. Expected {self.model.input_shape}"
-                )
-
-            return self.inference(X).numpy()
-
-        if self.model is None:
-            self.load_model()
-
-        X = check_array(X, allow_nd=True)
-
-        if self.memory_demanding:
-            return np.array([_transform(x[None, ...]) for x in X])
-        else:
-            return _transform(X)
-
-    def __getstate__(self):
-        # Handling unpicklable objects
-        d = self.__dict__.copy()
-        d["model"] = None
-        return d
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
-
-    def __del__(self):
-        self.model = None
-
-
-class InceptionResnetv2_MsCeleb_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v2 model trained in 2018 using the MSCeleb dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv2_msceleb_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv2_MsCeleb_CenterLoss_2018"]
-            is None
-            else rc["bob.bio.face.models.InceptionResnetv2_MsCeleb_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv2_msceleb_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv2_MsCeleb_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class InceptionResnetv2_Casia_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v2 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv2_casia_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv2_Casia_CenterLoss_2018"] is None
-            else rc["bob.bio.face.models.InceptionResnetv2_Casia_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv2_casia_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv2_Casia_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class InceptionResnetv1_Casia_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v1 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv1_casia_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv1_Casia_CenterLoss_2018"] is None
-            else rc["bob.bio.face.models.InceptionResnetv1_Casia_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv1_casia_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv1_Casia_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class InceptionResnetv1_MsCeleb_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v1 model trained in 2018 using the MsCeleb dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv1_msceleb_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
-            is None
-            else rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv1_msceleb_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv1_MsCeleb_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class FaceNetSanderberg_20170512_110547(TransformTensorflow):
-    """
-    Wrapper for the free FaceNet from David Sanderberg model 20170512_110547:
-    https://github.com/davidsandberg/facenet
-
-    And for a preprocessor you can use::
-
-        from bob.bio.face.preprocessor import FaceCrop
-        # This is the size of the image that this model expects
-        CROPPED_IMAGE_HEIGHT = 160
-        CROPPED_IMAGE_WIDTH = 160
-        # eye positions for frontal images
-        RIGHT_EYE_POS = (46, 53)
-        LEFT_EYE_POS = (46, 107)
-        # Crops the face using eye annotations
-        preprocessor = FaceCrop(
-            cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
-            cropped_positions={'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
-            color_channel='rgb'
-        )
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "facenet_sanderberg_20170512_110547"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.facenet_sanderberg_20170512_110547"] is None
-            else rc["bob.bio.face.models.facenet_sanderberg_20170512_110547"]
-        )
-
-        urls = [
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/facenet_sanderberg_20170512_110547.tar.gz"
-        ]
-
-        download_model(
-            checkpoint_path, urls, "facenet_sanderberg_20170512_110547.tar.gz"
-        )
-
-        super(FaceNetSanderberg_20170512_110547, self).__init__(
-            checkpoint_path,
-            tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py
index 0386395311765a21d678b49df6902ba4feb27063..aa2fdf989bfcc73492221f69fc94a3b15b909d09 100644
--- a/bob/bio/face/test/test_baselines.py
+++ b/bob/bio/face/test/test_baselines.py
@@ -171,11 +171,11 @@ def test_gabor_graph():
 @is_library_available("torch")
 def test_afffe():
     run_baseline(
-        "afffe", target_scores=-1.0274936425058916,
+        "afffe", target_scores=-0.27480835869298026,
     )
 
 
 @pytest.mark.slow
 @is_library_available("cv2")
 def test_vgg16_oxford():
-    run_baseline("vgg16-oxford", target_scores=-0.9911880900309596)
+    run_baseline("vgg16-oxford", target_scores=-0.0003682451299356071)
diff --git a/setup.py b/setup.py
index 6efb33dd89ea2532ebda7e053b6b4f530d0926ae..e877cae052b5f4bdba3d635a4b1e0c9f7342ddef 100644
--- a/setup.py
+++ b/setup.py
@@ -121,25 +121,6 @@ setup(
             "mtcnn                    = bob.bio.face.config.annotator.mtcnn:annotator",
             "tinyface                 = bob.bio.face.config.annotator.tinyface:annotator",
         ],
-        "bob.bio.transformer": [
-            "facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:transformer",
-            "facedetect = bob.bio.face.config.annotator.facedetect:transformer",
-            "flandmark = bob.bio.face.config.annotator.flandmark:annotator",
-            "mtcnn = bob.bio.face.config.annotator.mtcnn:transformer",
-            "tinyface = bob.bio.face.config.annotator.tinyface:transformer",
-            "facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:transformer",
-            "inception-resnetv1-casiawebface = bob.bio.face.config.baseline.inception_resnetv1_casiawebface:transformer",
-            "inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer",
-            "inception-resnetv1-msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb:transformer",
-            "inception-resnetv2-msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb:transformer",
-            "arcface-insightface = bob.bio.face.config.baseline.arcface_insightface:transformer",
-            "gabor-graph = bob.bio.face.config.baseline.gabor_graph:transformer",
-            "lgbphs = bob.bio.face.config.baseline.lgbphs:transformer",
-            "dummy = bob.bio.face.config.baseline.dummy:transformer",
-            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:transformer",
-            "afffe = bob.bio.face.config.baseline.afffe:transformer",
-            "vgg16-oxford = bob.bio.face.config.baseline.vgg16_oxford:transformer",
-        ],
         # baselines
         "bob.bio.pipeline": [
             "facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:pipeline",