diff --git a/bob/bio/face/annotator/__init__.py b/bob/bio/face/annotator/__init__.py
index 600daa739fc6ccb8e34cdc3c2f4e23516d6e2093..b058d897018835424f7b6c4d999c85d289df513a 100644
--- a/bob/bio/face/annotator/__init__.py
+++ b/bob/bio/face/annotator/__init__.py
@@ -58,6 +58,7 @@ from .Base import Base
 from .bobipfacedetect import BobIpFacedetect
 from .bobipflandmark import BobIpFlandmark
 from .bobipmtcnn import BobIpMTCNN
+from .bobiptinyface import BobIpTinyface
 
 
 # gets sphinx autodoc done right - don't remove it
@@ -84,6 +85,7 @@ __appropriate__(
     BobIpFacedetect,
     BobIpFlandmark,
     BobIpMTCNN,
+    BobIpTinyface,
 )
 
-__all__ = [_ for _ in dir() if not _.startswith('_')]
+__all__ = [_ for _ in dir() if not _.startswith('_')]
\ No newline at end of file
diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py
new file mode 100644
index 0000000000000000000000000000000000000000..fb6fbfe252a6f091e5c1b46025af89e672fa8f4a
--- /dev/null
+++ b/bob/bio/face/annotator/bobiptinyface.py
@@ -0,0 +1,40 @@
+import bob.ip.facedetect.tinyface
+from . import Base
+
+
+class BobIpTinyface(Base):
+    """Annotator using tinyface in bob.ip.facedetect"""
+
+    def __init__(self, prob_thresh=0.5, **kwargs):
+        super(BobIpTinyface, self).__init__(**kwargs)
+        self.detector = bob.ip.facedetect.tinyface.TinyFacesDetector(
+            prob_thresh=prob_thresh
+        )
+
+    @property
+    def prob_thresh(self):
+        return self.detector.prob_thresh
+
+    def annotate(self, image, **kwargs):
+        """Annotates an image using tinyface
+
+        Parameters
+        ----------
+        image : numpy.array
+            An RGB image in Bob format.
+        **kwargs
+            Ignored.
+
+        Returns
+        -------
+        dict
+            Annotations with (topleft, bottomright) keys (or None).
+        """
+
+        # return the annotations for the first/largest face
+        annotations = self.detector.detect(image)
+
+        if annotations is not None:
+            return annotations[0]
+        else:
+            return None
diff --git a/bob/bio/face/config/annotator/tinyface.py b/bob/bio/face/config/annotator/tinyface.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf223ea9802174d27ca2b7c0a41c6da61fb56f2e
--- /dev/null
+++ b/bob/bio/face/config/annotator/tinyface.py
@@ -0,0 +1,3 @@
+from bob.bio.face.annotator import BobIpTinyface
+
+annotator = BobIpTinyface()
\ No newline at end of file
diff --git a/bob/bio/face/config/baseline/afffe.py b/bob/bio/face/config/baseline/afffe.py
new file mode 100644
index 0000000000000000000000000000000000000000..d46eb007efbc3f88b51eaf3455bb8fd23a7cf33c
--- /dev/null
+++ b/bob/bio/face/config/baseline/afffe.py
@@ -0,0 +1,15 @@
+from bob.bio.face.embeddings.pytorch import afffe_baseline
+from bob.bio.face.utils import lookup_config_from_database
+
+
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
+
+
+def load(annotation_type, fixed_positions=None, memory_demanding=False):
+    return afffe_baseline(annotation_type, fixed_positions, memory_demanding)
+
+
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
+
diff --git a/bob/bio/face/config/baseline/arcface_insightface.py b/bob/bio/face/config/baseline/arcface_insightface.py
index 3e0181219dbf40d17e32d36d85a909a102b87fd1..311672c6c7ecba11a2d1d4d9f531b42dcf8a8a11 100644
--- a/bob/bio/face/config/baseline/arcface_insightface.py
+++ b/bob/bio/face/config/baseline/arcface_insightface.py
@@ -1,19 +1,18 @@
-from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
+from bob.bio.face.embeddings.mxnet import arcface_insightFace_lresnet100
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
+
 def load(annotation_type, fixed_positions=None):
 
-    return arcface_baseline(
-        embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
+    return arcface_insightFace_lresnet100(
         annotation_type=annotation_type,
         fixed_positions=fixed_positions,
+        memory_demanding=memory_demanding,
     )
 
 
 pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index 510d9287fba4613f072482f0f57026d8fec824ba..add7ac6fa4bbf3902e3a6df6f10dc165969364de 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -1,21 +1,15 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    FaceNetSanderberg_20170512_110547,
-)
+from bob.bio.face.embeddings.tensorflow import facenet_sanderberg_20170512_110547
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=False):
+    return facenet_sanderberg_20170512_110547(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index 149c074270a1001513dfbeacb0cbf09325784b28..1bb1b0bf61ed5cbb35ff97f84f37a19033bc33b4 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -1,23 +1,15 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv1_Casia_CenterLoss_2018,
-)
+from bob.bio.face.embeddings.tensorflow import inception_resnet_v1_casia_centerloss_2018
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v1_casia_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index f7ce09f7d1d2bd57eee0490b269c0b5a857c1a8d..92c329481a689b7c2d254deca5da51c8db1a68b1 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -1,24 +1,17 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv1_MsCeleb_CenterLoss_2018,
+from bob.bio.face.embeddings.tensorflow import (
+    inception_resnet_v1_msceleb_centerloss_2018,
 )
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v1_msceleb_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index eadd91541b85511f31e48ad97140ba6bc5dc5d61..0cc4eb79003316dee55435b383f18217046635f8 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -1,24 +1,15 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv2_Casia_CenterLoss_2018,
-)
+from bob.bio.face.embeddings.tensorflow import inception_resnet_v2_casia_centerloss_2018
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v2_casia_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index 0be122d36c854becb94ef192ecc79d5134682975..01e4d6ee0d6e543b4048aa204de48c65e126e076 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -1,23 +1,17 @@
-from bob.bio.face.embeddings.tf2_inception_resnet import (
-    InceptionResnetv2_MsCeleb_CenterLoss_2018,
+from bob.bio.face.embeddings.tensorflow import (
+    inception_resnet_v2_msceleb_centerloss_2018,
 )
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return facenet_baseline(
-        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
-            memory_demanding=memory_demanding
-        ),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return inception_resnet_v2_msceleb_centerloss_2018(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/iresnet100.py b/bob/bio/face/config/baseline/iresnet100.py
new file mode 100644
index 0000000000000000000000000000000000000000..666e6f34cb94bd306abfafe8cab4c657520da052
--- /dev/null
+++ b/bob/bio/face/config/baseline/iresnet100.py
@@ -0,0 +1,15 @@
+from bob.bio.face.embeddings.pytorch import iresnet100
+from bob.bio.face.utils import lookup_config_from_database
+
+
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
+
+
+def load(annotation_type, fixed_positions=None, memory_demanding=False):
+    return iresnet100(annotation_type, fixed_positions, memory_demanding)
+
+
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
+
diff --git a/bob/bio/face/config/baseline/iresnet34.py b/bob/bio/face/config/baseline/iresnet34.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8cdf0ba9f19c1186dfdd726e9db578f9586afd6
--- /dev/null
+++ b/bob/bio/face/config/baseline/iresnet34.py
@@ -0,0 +1,15 @@
+from bob.bio.face.embeddings.pytorch import iresnet34
+from bob.bio.face.utils import lookup_config_from_database
+
+
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
+
+
+def load(annotation_type, fixed_positions=None, memory_demanding=False):
+    return iresnet34(annotation_type, fixed_positions, memory_demanding)
+
+
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
+
diff --git a/bob/bio/face/config/baseline/iresnet50.py b/bob/bio/face/config/baseline/iresnet50.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a33a73410fb144098f945a02758f09de2678339
--- /dev/null
+++ b/bob/bio/face/config/baseline/iresnet50.py
@@ -0,0 +1,15 @@
+from bob.bio.face.embeddings.pytorch import iresnet50
+from bob.bio.face.utils import lookup_config_from_database
+
+
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
+
+
+def load(annotation_type, fixed_positions=None, memory_demanding=False):
+    return iresnet50(annotation_type, fixed_positions, memory_demanding)
+
+
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
+
diff --git a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
index 2355494888b81e8d8f7e8e49dc4ba8a3ba43daea..69423f543a4f8485e81e8cc4510d634cb2ebcb5a 100644
--- a/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/mobilenetv2_msceleb_arcface_2021.py
@@ -1,21 +1,15 @@
-from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
+from bob.bio.face.embeddings.tensorflow import mobilenetv2_msceleb_arcface_2021
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-
-    return arcface_baseline(
-        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return mobilenetv2_msceleb_arcface_2021(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
index 0560a97a62f42d33f5d8c54c3177760db61ab9fc..08f7dd62b40024c00658789cdcf5b99eb8317e5d 100644
--- a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_2021.py
@@ -1,20 +1,15 @@
-from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
+from bob.bio.face.embeddings.tensorflow import resnet50_msceleb_arcface_2021
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return arcface_baseline(
-        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return resnet50_msceleb_arcface_2021(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/resnet50_msceleb_arcface_20210521.py b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_20210521.py
new file mode 100644
index 0000000000000000000000000000000000000000..b54d2cafeaea1865bc5c642eef4669db9a5d11e1
--- /dev/null
+++ b/bob/bio/face/config/baseline/resnet50_msceleb_arcface_20210521.py
@@ -0,0 +1,15 @@
+from bob.bio.face.embeddings.tensorflow import resnet50_msceleb_arcface_20210521
+from bob.bio.face.utils import lookup_config_from_database
+
+annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
+    locals().get("database")
+)
+
+
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return resnet50_msceleb_arcface_20210521(
+        annotation_type, fixed_positions, memory_demanding
+    )
+
+
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
index 64d6ec4c84fbb43c1ef3061c070c6a568e7d74d6..475e7efd3d21e64aae8f424ed66baca1efa47d28 100644
--- a/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
+++ b/bob/bio/face/config/baseline/resnet50_vgg2_arcface_2021.py
@@ -1,20 +1,15 @@
-from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
+from bob.bio.face.embeddings.tensorflow import resnet50_vgg2_arcface_2021
 from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import arcface_baseline
-
 
 annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
     locals().get("database")
 )
 
 
-def load(annotation_type, fixed_positions=None):
-    return arcface_baseline(
-        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
+def load(annotation_type, fixed_positions=None, memory_demanding=None):
+    return resnet50_vgg2_arcface_2021(
+        annotation_type, fixed_positions, memory_demanding
     )
 
 
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
+pipeline = load(annotation_type, fixed_positions, memory_demanding)
diff --git a/bob/bio/face/config/baseline/templates.py b/bob/bio/face/config/baseline/templates.py
deleted file mode 100644
index 415ecd2831432dfb96b94ff4eae814409cdc2088..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/templates.py
+++ /dev/null
@@ -1,54 +0,0 @@
-from bob.bio.face.utils import (
-    dnn_default_cropping,
-    embedding_transformer,
-)
-from bob.bio.base.pipelines.vanilla_biometrics import (
-    Distance,
-    VanillaBiometricsPipeline,
-)
-
-
-def arcface_baseline(embedding, annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (112, 112)
-    if annotation_type == "eyes-center":
-        # Hard coding eye positions for backward consistency
-        cropped_positions = {
-            "leye": (55, 81),
-            "reye": (55, 42),
-        }
-    else:
-        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
-        embedding=embedding,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
-    )
-
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
-
-
-def facenet_baseline(embedding, annotation_type, fixed_positions=None):
-    # DEFINE CROPPING
-    cropped_image_size = (160, 160)
-    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
-
-    # ASSEMBLE TRANSFORMER
-    transformer = embedding_transformer(
-        cropped_image_size=cropped_image_size,
-        embedding=embedding,
-        cropped_positions=cropped_positions,
-        fixed_positions=fixed_positions,
-        color_channel="rgb",
-        annotator="mtcnn",
-    )
-
-    algorithm = Distance()
-
-    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
deleted file mode 100644
index 38d78c53de52cac64e1e668f98c72635789adf99..0000000000000000000000000000000000000000
--- a/bob/bio/face/config/baseline/tf2_inception_resnet.py
+++ /dev/null
@@ -1,24 +0,0 @@
-from bob.extension import rc
-from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
-from bob.bio.face.utils import lookup_config_from_database
-from bob.bio.face.config.baseline.templates import facenet_baseline
-
-annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
-    locals().get("database")
-)
-
-
-def load(annotation_type, fixed_positions=None):
-    extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
-    embedding = InceptionResnetv2(
-        checkpoint_path=extractor_path, memory_demanding=memory_demanding
-    )
-    return facenet_baseline(
-        embedding=embedding,
-        annotation_type=annotation_type,
-        fixed_positions=fixed_positions,
-    )
-
-
-pipeline = load(annotation_type, fixed_positions)
-transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/vgg16_oxford.py b/bob/bio/face/config/baseline/vgg16_oxford.py
new file mode 100644
index 0000000000000000000000000000000000000000..23bf274acc7fe480f7846241b74a95ff1076671b
--- /dev/null
+++ b/bob/bio/face/config/baseline/vgg16_oxford.py
@@ -0,0 +1,14 @@
+from bob.bio.face.embeddings.opencv import vgg16_oxford_baseline
+from bob.bio.face.utils import lookup_config_from_database
+
+
+annotation_type, fixed_positions, _ = lookup_config_from_database(
+    locals().get("database")
+)
+
+
+def load(annotation_type, fixed_positions=None):
+    return vgg16_oxford_baseline(annotation_type, fixed_positions)
+
+
+pipeline = load(annotation_type, fixed_positions)
diff --git a/bob/bio/face/database/casia_africa.py b/bob/bio/face/database/casia_africa.py
index 833a18585cdf83f4d84c2bb22af1519cd801a697..23a696fd2d128dbb3fd51f145995f62f255f6356 100644
--- a/bob/bio/face/database/casia_africa.py
+++ b/bob/bio/face/database/casia_africa.py
@@ -97,7 +97,7 @@ class CasiaAfricaDatabase(CSVDataset):
         One of the database protocols. Options are "ID-V-All-Ep1", "ID-V-All-Ep2" and "ID-V-All-Ep3"
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = CasiaAfricaDatabase.urls()
@@ -107,9 +107,6 @@ class CasiaAfricaDatabase(CSVDataset):
             file_hash="324bd69b581477d30606417be8e30d2a",
         )
 
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
-
         directory = (
             rc["bob.db.casia-africa.directory"]
             if rc["bob.db.casia-africa.directory "]
@@ -117,8 +114,9 @@ class CasiaAfricaDatabase(CSVDataset):
         )
 
         super().__init__(
-            filename,
-            protocol,
+            name="casia-africa",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=bob.io.base.load,
@@ -128,6 +126,8 @@ class CasiaAfricaDatabase(CSVDataset):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
     @staticmethod
diff --git a/bob/bio/face/database/cbsr_nir_vis_2.py b/bob/bio/face/database/cbsr_nir_vis_2.py
index 2d34dc372aa8a4bd65bf7ae47adc793594b634a2..581ca39a89d8bc08345699385c749fd703e50519 100644
--- a/bob/bio/face/database/cbsr_nir_vis_2.py
+++ b/bob/bio/face/database/cbsr_nir_vis_2.py
@@ -54,15 +54,13 @@ class CBSRNirVis2Database(CSVDataset):
         One of the database protocols.
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = CBSRNirVis2Database.urls()
         filename = get_file(
             "cbsr_nir_vis_2.tar.gz", urls, file_hash="116da4537c1099915cdc0f08feb651bd",
         )
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
 
         directory = (
             rc["bob.db.cbsr-nir-vis-2.directory"]
@@ -81,8 +79,9 @@ class CBSRNirVis2Database(CSVDataset):
                 raise ValueError("File `{0}` not found".format(str(new_filename)))
 
         super().__init__(
-            filename,
-            protocol,
+            name="cbsr-nir-vis2",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=load,
@@ -91,6 +90,8 @@ class CBSRNirVis2Database(CSVDataset):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
     @staticmethod
diff --git a/bob/bio/face/database/frgc.py b/bob/bio/face/database/frgc.py
index 12964a310a3b7f33976b84b99d0063bbf1a53549..8403ac9190400225ecc90ad3680aab311bcc00c5 100644
--- a/bob/bio/face/database/frgc.py
+++ b/bob/bio/face/database/frgc.py
@@ -20,7 +20,7 @@ class FRGCDatabase(CSVDataset):
     Face Recognition Grand Test dataset
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = FRGCDatabase.urls()
@@ -28,12 +28,10 @@ class FRGCDatabase(CSVDataset):
             "frgc.tar.gz", urls, file_hash="328d2c71ae19a41679defa9585b3140f"
         )
 
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
-
         super().__init__(
-            filename,
-            protocol,
+            name="frgc",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=bob.io.base.load,
@@ -44,6 +42,8 @@ class FRGCDatabase(CSVDataset):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
     @staticmethod
diff --git a/bob/bio/face/database/meds.py b/bob/bio/face/database/meds.py
index 2181f82bcf844ee429cdd990c5b6c1fa6e1bfda6..5286de567ed5053843b68935801fcf6d911af12f 100644
--- a/bob/bio/face/database/meds.py
+++ b/bob/bio/face/database/meds.py
@@ -93,7 +93,7 @@ class MEDSDatabase(CSVDatasetZTNorm):
 
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = MEDSDatabase.urls()
@@ -101,12 +101,10 @@ class MEDSDatabase(CSVDatasetZTNorm):
             "meds.tar.gz", urls, file_hash="3b01354d4c170672ac14120b80dace75"
         )
 
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
-
-        database = CSVDataset(
-            filename,
-            protocol,
+        super().__init__(
+            name="meds",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=bob.io.base.load,
@@ -117,10 +115,10 @@ class MEDSDatabase(CSVDatasetZTNorm):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
-        super().__init__(database)
-
     @staticmethod
     def urls():
         return [
diff --git a/bob/bio/face/database/mobio.py b/bob/bio/face/database/mobio.py
index 7a06b2a6b4384e6bc57d50c58f50dcaaafd6ba13..94dbe5d7b5d805a84da5e668207c77a0502d2f5c 100644
--- a/bob/bio/face/database/mobio.py
+++ b/bob/bio/face/database/mobio.py
@@ -48,7 +48,7 @@ class MobioDatabase(CSVDatasetZTNorm):
 
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = MobioDatabase.urls()
@@ -56,12 +56,10 @@ class MobioDatabase(CSVDatasetZTNorm):
             "mobio.tar.gz", urls, file_hash="42cee778c17a34762d5fc5dd13ce3ee6"
         )
 
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
-
-        database = CSVDataset(
-            filename,
-            protocol,
+        super().__init__(
+            name="mobio",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=bob.io.base.load,
@@ -72,10 +70,10 @@ class MobioDatabase(CSVDatasetZTNorm):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
-        super().__init__(database)
-
     @staticmethod
     def protocols():
         # TODO: Until we have (if we have) a function that dumps the protocols, let's use this one.
diff --git a/bob/bio/face/database/morph.py b/bob/bio/face/database/morph.py
index a1e37074a455d7b0c7ea679608d2432f0362382d..b922f49b4fde078fb58fa140d863486297ddad91 100644
--- a/bob/bio/face/database/morph.py
+++ b/bob/bio/face/database/morph.py
@@ -57,7 +57,7 @@ class MorphDatabase(CSVDatasetZTNorm):
 
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = MorphDatabase.urls()
@@ -65,12 +65,10 @@ class MorphDatabase(CSVDatasetZTNorm):
             "morph.tar.gz", urls, file_hash="9efa1ff13ef6984ebfcf86f1b1f58873"
         )
 
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
-
-        database = CSVDataset(
-            filename,
-            protocol,
+        super().__init__(
+            name="morph",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=bob.io.base.load,
@@ -81,10 +79,10 @@ class MorphDatabase(CSVDatasetZTNorm):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
-        super().__init__(database)
-
     @staticmethod
     def urls():
         return [
diff --git a/bob/bio/face/database/multipie.py b/bob/bio/face/database/multipie.py
index 6a257c046af5a165e2a376a5e12fa2c80999d839..888e1d7de79a16e2a36790172dc675e54b7b9302 100644
--- a/bob/bio/face/database/multipie.py
+++ b/bob/bio/face/database/multipie.py
@@ -86,7 +86,7 @@ class MultipieDatabase(CSVDataset):
 
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = MultipieDatabase.urls()
@@ -94,12 +94,10 @@ class MultipieDatabase(CSVDataset):
             "multipie.tar.gz", urls, file_hash="6c27c9616c2d0373c5f052b061d80178"
         )
 
-        self.annotation_type = ["eyes-center", "left-profile", "right-profile"]
-        self.fixed_positions = None
-
         super().__init__(
-            filename,
-            protocol,
+            name="multipie",
+            dataset_protocol_path=filename,
+            protocol=protocol,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=bob.io.base.load,
@@ -110,6 +108,8 @@ class MultipieDatabase(CSVDataset):
                 ),
                 MultiposeAnnotations(),
             ),
+            annotation_type=["eyes-center", "left-profile", "right-profile"],
+            fixed_positions=None,
         )
 
     @staticmethod
diff --git a/bob/bio/face/database/pola_thermal.py b/bob/bio/face/database/pola_thermal.py
index 5a2e6d5c489fb81b20d7afd3d788bf51966ec011..5b4063e7ab33b032d72f9662adce995c3ca23e27 100644
--- a/bob/bio/face/database/pola_thermal.py
+++ b/bob/bio/face/database/pola_thermal.py
@@ -79,7 +79,7 @@ class PolaThermalDatabase(CSVDataset):
         One of the database protocols.
     """
 
-    def __init__(self, protocol):
+    def __init__(self, protocol, annotation_type="eyes-center", fixed_positions=None):
 
         # Downloading model if not exists
         urls = PolaThermalDatabase.urls()
@@ -87,9 +87,6 @@ class PolaThermalDatabase(CSVDataset):
             "pola_thermal.tar.gz", urls, file_hash="cfbd7362773c6d49292fe1998e3c3825",
         )
 
-        self.annotation_type = "eyes-center"
-        self.fixed_positions = None
-
         directory = (
             rc["bob.db.pola-thermal.directory"]
             if rc["bob.db.pola-thermal.directory"]
@@ -103,8 +100,9 @@ class PolaThermalDatabase(CSVDataset):
             return bob.io.base.load(path) / 255
 
         super().__init__(
-            filename,
-            protocol,
+            name="polathermal",
+            protocol=protocol,
+            dataset_protocol_path=filename,
             csv_to_sample_loader=make_pipeline(
                 CSVToSampleLoaderBiometrics(
                     data_loader=load,
@@ -113,6 +111,8 @@ class PolaThermalDatabase(CSVDataset):
                 ),
                 EyesAnnotations(),
             ),
+            annotation_type=annotation_type,
+            fixed_positions=fixed_positions,
         )
 
     @staticmethod
diff --git a/bob/bio/face/embeddings/__init__.py b/bob/bio/face/embeddings/__init__.py
index aea7ea8f3dbaf257cfc78cfecc2a017c979812a6..0dfddd48ad7fabfe266912392fe9b069005d8c48 100644
--- a/bob/bio/face/embeddings/__init__.py
+++ b/bob/bio/face/embeddings/__init__.py
@@ -1,31 +1,3 @@
-import os
-import bob.extension.download
-
-
-def download_model(model_path, urls, zip_file="model.tar.gz"):
-    """
-    Download and unzip a model from some URL.
-
-    Parameters
-    ----------
-
-    model_path: str
-        Path where the model is supposed to be stored
-
-    urls: list
-        List of paths where the model is stored
-
-    zip_file: str
-        File name after the download
-
-    """
-
-    if not os.path.exists(model_path):
-        os.makedirs(model_path, exist_ok=True)
-        zip_file = os.path.join(model_path, zip_file)
-        bob.extension.download.download_and_unzip(urls, zip_file)
-
-
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
     """Says object was actually declared here, and not in the import module.
diff --git a/bob/bio/face/embeddings/mobilenet_v2.py b/bob/bio/face/embeddings/mobilenet_v2.py
deleted file mode 100644
index 4966583ff6388b44706eec199fc3e6dcb8768359..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/mobilenet_v2.py
+++ /dev/null
@@ -1,79 +0,0 @@
-from bob.bio.face.embeddings import download_model
-
-
-from .tf2_inception_resnet import TransformTensorflow
-import pkg_resources
-import os
-from bob.extension import rc
-import tensorflow as tf
-
-
-class MobileNetv2_MsCeleb_ArcFace_2021(TransformTensorflow):
-    """
-    MobileNet Backbone trained with the MSCeleb 1M database.
-
-    The bottleneck layer (a.k.a embedding) has 512d.
-
-    The configuration file used to trained is:
-
-    ```yaml
-    batch-size: 128
-    face-size: 112
-    face-output_size: 112
-    n-classes: 85742
-
-
-    ## Backbone
-    backbone: 'mobilenet-v2'
-    head: 'arcface'
-    s: 10
-    bottleneck: 512
-    m: 0.5
-
-    # Training parameters
-    solver: "sgd"
-    lr: 0.01
-    dropout-rate: 0.5
-    epochs: 500
-
-
-    train-tf-record-path: "<PATH>"
-    validation-tf-record-path: "<PATH>"
-
-    ```
-
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "mobilenet-v2-msceleb-arcface-2021"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.mobilenet-v2-msceleb-arcface-2021"] is None
-            else rc["bob.bio.face.models.mobilenet-v2-msceleb-arcface-2021"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021.tar.gz",
-        ]
-
-        download_model(checkpoint_path, urls, "mobilenet-v2-msceleb-arcface-2021.tar.gz")
-
-        super(MobileNetv2_MsCeleb_ArcFace_2021, self).__init__(
-            checkpoint_path,
-            preprocessor=lambda X: X / 255.0,
-            memory_demanding=memory_demanding,
-        )
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)[0]
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
-
diff --git a/bob/bio/face/embeddings/mxnet.py b/bob/bio/face/embeddings/mxnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..8bb1bde60f9634cfd2603fe8191704ca15893faa
--- /dev/null
+++ b/bob/bio/face/embeddings/mxnet.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.utils import check_array
+from bob.extension.download import get_file
+import numpy as np
+import os
+
+
+class MxNetTransformer(TransformerMixin, BaseEstimator):
+
+    """
+    Base Transformer for MxNet architectures.
+
+    Parameters:
+    -----------
+
+      checkpoint_path : str
+         Path containing the checkpoint
+
+      config : str
+         json file containing the DNN spec
+
+      preprocessor:
+         A function that will transform the data right before forward. The default transformation is `X=X`
+
+      use_gpu: bool
+    """
+
+    def __init__(
+        self,
+        checkpoint_path=None,
+        config=None,
+        use_gpu=False,
+        memory_demanding=False,
+        preprocessor=lambda x: x,
+        **kwargs,
+    ):
+        super().__init__(**kwargs)
+        self.checkpoint_path = checkpoint_path
+        self.config = config
+        self.use_gpu = use_gpu
+        self.model = None
+        self.memory_demanding = memory_demanding
+        self.preprocessor = preprocessor
+
+    def _load_model(self):
+        import mxnet as mx
+        from mxnet import gluon
+        import warnings
+
+        ctx = mx.gpu() if self.use_gpu else mx.cpu()
+
+        with warnings.catch_warnings():
+            warnings.simplefilter("ignore")
+            deserialized_net = gluon.nn.SymbolBlock.imports(
+                self.config, ["data"], self.checkpoint_path, ctx=ctx
+            )
+
+        self.model = deserialized_net
+
+    def transform(self, X):
+
+        import mxnet as mx
+
+        if self.model is None:
+            self._load_model()
+
+        X = check_array(X, allow_nd=True)
+        X = self.preprocessor(X)
+
+        def _transform(X):
+            X = mx.nd.array(X)
+            db = mx.io.DataBatch(data=(X,))
+            self.model.forward(db, is_train=False)
+            return self.model.get_outputs()[0].asnumpy()
+
+        if self.memory_demanding:
+            return np.array([_transform(x[None, ...]) for x in X])
+        else:
+            return _transform(X)
+
+    def __getstate__(self):
+        # Handling unpicklable objects
+
+        d = self.__dict__.copy()
+        d["model"] = None
+        return d
+
+    def _more_tags(self):
+        return {"stateless": True, "requires_fit": False}
+
+
+class ArcFaceInsightFace_LResNet100(MxNetTransformer):
+    """
+    Extracts features using deep face recognition models under MxNet Interfaces.
+  
+    Users can download the pretrained face recognition models with MxNet Interface. The path to downloaded models (and weights) should be specified while calling this class, usually in the configuration file of an experiment. 
+ 
+    Examples: (Pretrained ResNet models): `LResNet100E-IR,ArcFace@ms1m-refine-v2 <https://github.com/deepinsight/insightface>`_  
+  
+    The extracted features can be combined with different the algorithms.  
+
+    """
+
+    def __init__(self, memory_demanding=False, use_gpu=False):
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz",
+        ]
+        filename = get_file(
+            "arcface_r100_v1_mxnet.tar.gz",
+            urls,
+            cache_subdir="data/mxnet/arcface_r100_v1_mxnet",
+            file_hash="050ce7d6e731e560127c705f61391f48",
+            extract=True,
+        )
+        path = os.path.dirname(filename)
+        checkpoint_path = os.path.join(path, "model-symbol.json")
+        config = os.path.join(path, "model-0000.params")
+
+        super(ArcFaceInsightFace_LResNet100, self).__init__(
+            checkpoint_path=checkpoint_path,
+            config=config,
+            use_gpu=use_gpu,
+            memory_demanding=memory_demanding,
+        )
+
+    def _load_model(self):
+        import mxnet as mx
+
+        sym, arg_params, aux_params = mx.model.load_checkpoint(
+            os.path.join(os.path.dirname(self.checkpoint_path), "model"), 0
+        )
+
+        all_layers = sym.get_internals()
+        sym = all_layers["fc1_output"]
+
+        # LOADING CHECKPOINT
+        ctx = mx.gpu() if self.use_gpu else mx.cpu()
+        model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
+        data_shape = (1, 3, 112, 112)
+        model.bind(data_shapes=[("data", data_shape)])
+        model.set_params(arg_params, aux_params)
+
+        self.model = model
+
+
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+def arcface_template(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def arcface_insightFace_lresnet100(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    return arcface_template(
+        ArcFaceInsightFace_LResNet100(memory_demanding=memory_demanding),
+        annotation_type,
+        fixed_positions,
+    )
diff --git a/bob/bio/face/embeddings/mxnet_models.py b/bob/bio/face/embeddings/mxnet_models.py
deleted file mode 100644
index d8eb786491356ca1bfc0b6893d895bb49200125a..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/mxnet_models.py
+++ /dev/null
@@ -1,99 +0,0 @@
-"""
-Load and predict using checkpoints based on mxnet
-"""
-
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.utils import check_array
-import numpy as np
-from bob.bio.face.embeddings import download_model
-import pkg_resources
-import os
-from bob.extension import rc
-
-
-class ArcFaceInsightFace(TransformerMixin, BaseEstimator):
-    """
-    ArcFace from Insight Face.
-
-    Model and source code taken from the repository
-
-    https://github.com/deepinsight/insightface/blob/master/python-package/insightface/model_zoo/face_recognition.py
-
-    """
-
-    def __init__(self, use_gpu=False, memory_demanding=False, **kwargs):
-        super().__init__(**kwargs)
-        self.model = None
-        self.use_gpu = use_gpu
-        self.memory_demanding = memory_demanding
-
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "arcface_insightface"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.ArcFaceInsightFace"] is None
-            else rc["bob.bio.face.models.ArcFaceInsightFace"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz"
-        ]
-
-        download_model(checkpoint_path, urls, "arcface_r100_v1_mxnet.tar.gz")
-
-        self.checkpoint_path = checkpoint_path
-
-    def load_model(self):
-        import mxnet as mx
-
-        sym, arg_params, aux_params = mx.model.load_checkpoint(
-            os.path.join(self.checkpoint_path, "model"), 0
-        )
-
-        all_layers = sym.get_internals()
-        sym = all_layers["fc1_output"]
-
-        # LOADING CHECKPOINT
-        ctx = mx.gpu() if self.use_gpu else mx.cpu()
-        model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
-        data_shape = (1, 3, 112, 112)
-        model.bind(data_shapes=[("data", data_shape)])
-        model.set_params(arg_params, aux_params)
-
-        # warmup
-        data = mx.nd.zeros(shape=data_shape)
-        db = mx.io.DataBatch(data=(data,))
-        model.forward(db, is_train=False)
-        embedding = model.get_outputs()[0].asnumpy()
-        self.model = model
-
-    def transform(self, X):
-        import mxnet as mx
-
-        if self.model is None:
-            self.load_model()
-
-        X = check_array(X, allow_nd=True)
-
-        def _transform(X):
-            X = mx.nd.array(X)
-            db = mx.io.DataBatch(data=(X,))
-            self.model.forward(db, is_train=False)
-            return self.model.get_outputs()[0].asnumpy()
-
-        if self.memory_demanding:
-            return np.array([_transform(x[None, ...]) for x in X])
-        else:
-            return _transform(X)
-
-    def __getstate__(self):
-        # Handling unpicklable objects
-        d = self.__dict__.copy()
-        d["model"] = None
-        return d
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/embeddings/opencv.py b/bob/bio/face/embeddings/opencv.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a5d55f8e47be1b7cb126a0f07607cd78690ab64
--- /dev/null
+++ b/bob/bio/face/embeddings/opencv.py
@@ -0,0 +1,207 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+
+import bob.bio.base
+import numpy as np
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.utils import check_array
+import os
+from bob.extension.download import get_file
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+class OpenCVTransformer(TransformerMixin, BaseEstimator):
+    """
+    Base Transformer using the OpenCV DNN interface (https://docs.opencv.org/master/d2/d58/tutorial_table_of_content_dnn.html).
+
+
+    .. note::
+       This class supports Caffe ``.caffemodel``, Tensorflow ``.pb``, Torch ``.t7`` ``.net``, Darknet ``.weights``, DLDT ``.bin``, and ONNX ``.onnx``
+
+
+    Parameters
+    ----------
+
+    checkpoint_path: str
+       Path containing the checkpoint
+
+    config:
+        Path containing some configuration file (e.g. .json, .prototxt)
+
+    preprocessor:
+        A function that will transform the data right before forward. The default transformation is `X/255`
+
+    """
+
+    def __init__(
+        self,
+        checkpoint_path=None,
+        config=None,
+        preprocessor=lambda x: x / 255,
+        **kwargs,
+    ):
+        super().__init__(**kwargs)
+        self.checkpoint_path = checkpoint_path
+        self.config = config
+        self.model = None
+        self.preprocessor = preprocessor
+
+    def _load_model(self):
+        import cv2
+
+        net = cv2.dnn.readNet(self.checkpoint_path, self.config)
+        self.model = net
+
+    def transform(self, X):
+        """__call__(image) -> feature
+
+        Extracts the features from the given image.
+
+        **Parameters:**
+
+        X : 2D :py:class:`numpy.ndarray` (floats)
+          The image to extract the features from.
+
+        **Returns:**
+
+        feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
+          The list of features extracted from the image.
+        """
+
+        import cv2
+
+        if self.model is None:
+            self._load_model()
+
+        X = check_array(X, allow_nd=True)
+
+        X = self.preprocessor(X)
+
+        self.model.setInput(X)
+
+        return self.model.forward()
+
+    def __getstate__(self):
+        # Handling unpicklable objects
+
+        d = self.__dict__.copy()
+        d["model"] = None
+        return d
+
+    def _more_tags(self):
+        return {"stateless": True, "requires_fit": False}
+
+
+class VGG16_Oxford(OpenCVTransformer):
+    """
+    Original VGG16 model from the paper: https://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/parkhi15.pdf
+
+    """
+
+    def __init__(self, embedding_layer="fc7"):
+        urls = [
+            "https://www.robots.ox.ac.uk/~vgg/software/vgg_face/src/vgg_face_caffe.tar.gz",
+            "http://bobconda.lab.idiap.ch/public-upload/data/bob/bob.bio.face/master/caffe/vgg_face_caffe.tar.gz",
+        ]
+
+        filename = get_file(
+            "vgg_face_caffe.tar.gz",
+            urls,
+            cache_subdir="data/caffe/vgg_face_caffe",
+            file_hash="ee707ac6e890bc148cb155adeaad12be",
+            extract=True,
+        )
+        path = os.path.dirname(filename)
+        config = os.path.join(path, "vgg_face_caffe", "VGG_FACE_deploy.prototxt")
+        checkpoint_path = os.path.join(path, "vgg_face_caffe", "VGG_FACE.caffemodel")
+
+        caffe_average_img = [129.1863, 104.7624, 93.5940]
+        self.embedding_layer = embedding_layer
+
+        def preprocessor(X):
+            """
+            Normalize using data from caffe
+
+            Caffe has the shape `C x H x W` and the chanel is BGR and 
+
+            """
+
+            # Subtracting
+            X[:, :, :, 0] -= caffe_average_img[0]
+            X[:, :, :, 1] -= caffe_average_img[1]
+            X[:, :, :, 2] -= caffe_average_img[2]
+
+            # To BGR
+            X = X[:, ::-1, :, :].astype("float32")
+
+            return X
+
+        super(VGG16_Oxford, self).__init__(checkpoint_path, config, preprocessor)
+
+    def _load_model(self):
+        import cv2
+
+        net = cv2.dnn.readNet(self.checkpoint_path, self.config)
+        self.model = net
+
+    def transform(self, X):
+        import cv2
+
+        if self.model is None:
+            self._load_model()
+
+        X = check_array(X, allow_nd=True)
+
+        X = self.preprocessor(X)
+
+        self.model.setInput(X)
+
+        return self.model.forward(self.embedding_layer)
+
+
+def vgg16_oxford_baseline(annotation_type, fixed_positions=None):
+    """
+    Get the VGG16 pipeline which will crop the face :math:`224 \times 224`
+    use the :py:class:`VGG16_Oxford`
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+    """
+
+    # DEFINE CROPPING
+    cropped_image_size = (224, 224)
+
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {"reye": (112, 82), "leye": (112, 142)}
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=VGG16_Oxford(),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/embeddings/pytorch.py b/bob/bio/face/embeddings/pytorch.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d4c02eddd4d4173389b55ad97a181ef4e64ee99
--- /dev/null
+++ b/bob/bio/face/embeddings/pytorch.py
@@ -0,0 +1,383 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.utils import check_array
+import numpy as np
+import imp
+import os
+from bob.extension.download import get_file
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+class PyTorchModel(TransformerMixin, BaseEstimator):
+    """
+    Base Transformer using pytorch models
+
+
+    Parameters
+    ----------
+
+    checkpoint_path: str
+       Path containing the checkpoint
+
+    config:
+        Path containing some configuration file (e.g. .json, .prototxt)
+
+    preprocessor:
+        A function that will transform the data right before forward. The default transformation is `X/255`
+
+    """
+
+    def __init__(
+        self,
+        checkpoint_path=None,
+        config=None,
+        preprocessor=lambda x: x / 255,
+        memory_demanding=False,
+        **kwargs
+    ):
+
+        super().__init__(**kwargs)
+        self.checkpoint_path = checkpoint_path
+        self.config = config
+        self.model = None
+        self.preprocessor = preprocessor
+        self.memory_demanding = memory_demanding
+
+    def transform(self, X):
+        """__call__(image) -> feature
+
+        Extracts the features from the given image.
+
+        **Parameters:**
+
+        image : 2D :py:class:`numpy.ndarray` (floats)
+        The image to extract the features from.
+
+        **Returns:**
+
+        feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
+        The list of features extracted from the image.
+        """
+        import torch
+
+        if self.model is None:
+            self._load_model()
+        X = check_array(X, allow_nd=True)
+        X = torch.Tensor(X)
+        X = self.preprocessor(X)
+
+        def _transform(X):
+            return self.model(X).detach().numpy()
+
+        if self.memory_demanding:
+            return np.array([_transform(x[None, ...]) for x in X])
+        else:
+            return _transform(X)
+
+    def __getstate__(self):
+        # Handling unpicklable objects
+
+        d = self.__dict__.copy()
+        d["model"] = None
+        return d
+
+    def _more_tags(self):
+        return {"stateless": True, "requires_fit": False}
+
+
+class AFFFE_2021(PyTorchModel):
+    """
+    AFFFE Pytorch network that extracts 1000-dimensional features, trained by Manuel Gunther, as described in [LGB18]_
+    
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz",
+        ]
+
+        filename = get_file(
+            "AFFFE-42a53f19.tar.gz",
+            urls,
+            cache_subdir="data/pytorch/AFFFE-42a53f19.tar.gz",
+            file_hash="1358bbcda62cb59b85b2418ef1f81e9b",
+            extract=True,
+        )
+        path = os.path.dirname(filename)
+        config = os.path.join(path, "AFFFE.py")
+        checkpoint_path = os.path.join(path, "AFFFE.pth")
+
+        super(AFFFE_2021, self).__init__(
+            checkpoint_path, config, memory_demanding=memory_demanding
+        )
+
+    def _load_model(self):
+
+        import torch
+
+        MainModel = imp.load_source("MainModel", self.config)
+        network = torch.load(self.checkpoint_path)
+        network.eval()
+
+        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
+        network.to(device)
+
+        self.model = network
+
+
+def _get_iresnet_file():
+    urls = [
+        "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet-91a5de61.tar.gz",
+        "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet-91a5de61.tar.gz",
+    ]
+
+    return get_file(
+        "iresnet-91a5de61.tar.gz",
+        urls,
+        cache_subdir="data/pytorch/iresnet-91a5de61/",
+        file_hash="3976c0a539811d888ef5b6217e5de425",
+        extract=True,
+    )
+
+
+class IResnet34(PyTorchModel):
+    """
+    ArcFace model (RESNET 34) from Insightface ported to pytorch
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet-91a5de61.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/iresnet-91a5de61.tar.gz",
+        ]
+
+        filename = _get_iresnet_file()
+
+        path = os.path.dirname(filename)
+        config = os.path.join(path, "iresnet.py")
+        checkpoint_path = os.path.join(path, "iresnet34-5b0d0e90.pth")
+
+        super(IResnet34, self).__init__(
+            checkpoint_path, config, memory_demanding=memory_demanding
+        )
+
+    def _load_model(self):
+
+        model = imp.load_source("module", self.config).iresnet34(self.checkpoint_path)
+        self.model = model
+
+
+class IResnet50(PyTorchModel):
+    """
+    ArcFace model (RESNET 50) from Insightface ported to pytorch
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        filename = _get_iresnet_file()
+
+        path = os.path.dirname(filename)
+        config = os.path.join(path, "iresnet.py")
+        checkpoint_path = os.path.join(path, "iresnet50-7f187506.pth")
+
+        super(IResnet50, self).__init__(
+            checkpoint_path, config, memory_demanding=memory_demanding
+        )
+
+    def _load_model(self):
+
+        model = imp.load_source("module", self.config).iresnet50(self.checkpoint_path)
+        self.model = model
+
+
+class IResnet100(PyTorchModel):
+    """
+    ArcFace model (RESNET 100) from Insightface ported to pytorch
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        filename = _get_iresnet_file()
+
+        path = os.path.dirname(filename)
+        config = os.path.join(path, "iresnet.py")
+        checkpoint_path = os.path.join(path, "iresnet100-73e07ba7.pth")
+
+        super(IResnet100, self).__init__(
+            checkpoint_path, config, memory_demanding=memory_demanding
+        )
+
+    def _load_model(self):
+
+        model = imp.load_source("module", self.config).iresnet100(self.checkpoint_path)
+        self.model = model
+
+
+def iresnet_template(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def iresnet34(annotation_type, fixed_positions=None, memory_demanding=False):
+    """
+    Get the Resnet34 pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`IResnet34` to extract the features
+
+
+    code referenced from https://raw.githubusercontent.com/nizhib/pytorch-insightface/master/insightface/iresnet.py
+    https://github.com/nizhib/pytorch-insightface
+
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return iresnet_template(
+        embedding=IResnet34(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def iresnet50(annotation_type, fixed_positions=None, memory_demanding=False):
+    """
+    Get the Resnet50 pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`IResnet50` to extract the features
+
+
+    code referenced from https://raw.githubusercontent.com/nizhib/pytorch-insightface/master/insightface/iresnet.py
+    https://github.com/nizhib/pytorch-insightface
+
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return iresnet_template(
+        embedding=IResnet50(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def iresnet100(annotation_type, fixed_positions=None, memory_demanding=False):
+    """
+    Get the Resnet100 pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`IResnet100` to extract the features
+
+
+    code referenced from https://raw.githubusercontent.com/nizhib/pytorch-insightface/master/insightface/iresnet.py
+    https://github.com/nizhib/pytorch-insightface
+
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return iresnet_template(
+        embedding=IResnet100(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def afffe_baseline(annotation_type, fixed_positions=None, memory_demanding=False):
+    """
+    Get the AFFFE pipeline which will crop the face :math:`224 \times 224`
+    use the :py:class:`AFFFE_2021`
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+    """
+
+    # DEFINE CROPPING
+    cropped_image_size = (224, 224)
+
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=AFFFE_2021(memory_demanding=memory_demanding),
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
diff --git a/bob/bio/face/embeddings/resnet50.py b/bob/bio/face/embeddings/resnet50.py
deleted file mode 100644
index c542c39d734e7ec7d402e3834359977f406fe4a8..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/resnet50.py
+++ /dev/null
@@ -1,148 +0,0 @@
-from bob.bio.face.embeddings import download_model
-
-
-from .tf2_inception_resnet import TransformTensorflow
-import pkg_resources
-import os
-from bob.extension import rc
-import tensorflow as tf
-
-
-class Resnet50_MsCeleb_ArcFace_2021(TransformTensorflow):
-    """
-    Resnet50 Backbone trained with the MSCeleb 1M database.
-
-    The bottleneck layer (a.k.a embedding) has 512d.
-
-    The configuration file used to trained is:
-
-    ```yaml
-    batch-size: 128
-    face-size: 112
-    face-output_size: 112
-    n-classes: 85742
-
-
-    ## Backbone
-    backbone: 'resnet50'
-    head: 'arcface'
-    s: 10
-    bottleneck: 512
-    m: 0.5
-
-    # Training parameters
-    solver: "sgd"
-    lr: 0.01
-    dropout-rate: 0.5
-    epochs: 500
-
-
-    train-tf-record-path: "<PATH>"
-    validation-tf-record-path: "<PATH>"
-
-    ```
-
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "resnet50_msceleb_arcface_2021"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.resnet50_msceleb_arcface_2021"] is None
-            else rc["bob.bio.face.models.resnet50_msceleb_arcface_2021"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_msceleb_arcface_2021.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_msceleb_arcface_2021.tar.gz",
-        ]
-
-        download_model(checkpoint_path, urls, "resnet50_msceleb_arcface_2021.tar.gz")
-
-        super(Resnet50_MsCeleb_ArcFace_2021, self).__init__(
-            checkpoint_path,
-            preprocessor=lambda X: X / 255.0,
-            memory_demanding=memory_demanding,
-        )
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)[0]
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
-
-
-class Resnet50_VGG2_ArcFace_2021(TransformTensorflow):
-    """
-    Resnet50 Backbone trained with the VGG2 database.
-
-    The bottleneck layer (a.k.a embedding) has 512d.
-
-    The configuration file used to trained is:
-
-    ```yaml
-    batch-size: 128
-    face-size: 112
-    face-output_size: 112
-    n-classes: 8631
-
-
-    ## Backbone
-    backbone: 'resnet50'
-    head: 'arcface'
-    s: 64
-    bottleneck: 512
-    m: 0.5
-
-    # Training parameters
-    solver: "sgd"
-    lr: 0.1
-    dropout-rate: 0.5
-    epochs: 1047
-
-
-    train-tf-record-path: "<PATH>"
-    validation-tf-record-path: "<PATH>"
-
-    ```
-
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "resnet50_vgg2_arcface_2021"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.resnet50_vgg2_arcface_2021"] is None
-            else rc["bob.bio.face.models.resnet50_vgg2_arcface_2021"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
-        ]
-
-        download_model(checkpoint_path, urls, "resnet50_vgg2_arcface_2021.tar.gz")
-
-        super(Resnet50_VGG2_ArcFace_2021, self).__init__(
-            checkpoint_path,
-            preprocessor=lambda X: X / 255.0,
-            memory_demanding=memory_demanding,
-        )
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
diff --git a/bob/bio/face/embeddings/tensorflow.py b/bob/bio/face/embeddings/tensorflow.py
new file mode 100644
index 0000000000000000000000000000000000000000..0398cba609c1ce577acdbc82281a42a756cfd02a
--- /dev/null
+++ b/bob/bio/face/embeddings/tensorflow.py
@@ -0,0 +1,846 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
+
+# Tranformers based on tensorflow
+
+
+import os
+import pkg_resources
+from bob.learn.tensorflow.utils.image import to_channels_last
+from sklearn.base import TransformerMixin, BaseEstimator
+from bob.extension.download import get_file
+from sklearn.utils import check_array
+import numpy as np
+import tensorflow as tf
+
+from bob.bio.face.utils import (
+    dnn_default_cropping,
+    embedding_transformer,
+)
+
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+def sanderberg_rescaling():
+    # FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
+    # [-0.99609375, 0.99609375]
+    preprocessor = preprocessing.Rescaling(scale=1 / 128, offset=-127.5 / 128)
+    return preprocessor
+
+
+class TensorflowTransformer(TransformerMixin, BaseEstimator):
+    """
+    Base Transformer for Tensorflow architectures.
+
+    Parameters
+    ----------
+
+    checkpoint_path: str
+       Path containing the checkpoint
+
+    preprocessor:
+        A function that will transform the data right before forward
+
+    memory_demanding bool
+        If `True`, the `transform` method will run one sample at the time.
+        This is useful when there is not enough memory available to forward big chucks of data.
+    """
+
+    def __init__(
+        self, checkpoint_path, preprocessor=None, memory_demanding=False, **kwargs
+    ):
+        super().__init__(**kwargs)
+        self.checkpoint_path = checkpoint_path
+        self.model = None
+        self.preprocessor = preprocessor
+        self.memory_demanding = memory_demanding
+
+    def load_model(self):
+        self.model = tf.keras.models.load_model(self.checkpoint_path)
+
+    def transform(self, X):
+        def _transform(X):
+            X = tf.convert_to_tensor(X)
+            X = to_channels_last(X)
+
+            if X.shape[-3:] != self.model.input_shape[-3:]:
+                raise ValueError(
+                    f"Image shape {X.shape} not supported. Expected {self.model.input_shape}"
+                )
+
+            return self.inference(X).numpy()
+
+        if self.model is None:
+            self.load_model()
+
+        X = check_array(X, allow_nd=True)
+
+        if self.memory_demanding:
+            return np.array([_transform(x[None, ...]) for x in X])
+        else:
+            return _transform(X)
+
+    def __getstate__(self):
+        # Handling unpicklable objects
+        d = self.__dict__.copy()
+        d["model"] = None
+        return d
+
+    def inference(self, X):
+        if self.preprocessor is not None:
+            X = self.preprocessor(tf.cast(X, "float32"))
+
+        prelogits = self.model.predict_on_batch(X)
+        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
+        return embeddings
+
+    def _more_tags(self):
+        return {"stateless": True, "requires_fit": False}
+
+    def __del__(self):
+        self.model = None
+
+
+class InceptionResnetv2_MsCeleb_CenterLoss_2018(TensorflowTransformer):
+    """
+    InceptionResnet v2 model trained in 2018 using the MSCeleb dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
+        ]
+
+        filename = get_file(
+            "inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/inceptionresnetv2_msceleb_centerloss_2018",
+            file_hash="7c0aa46bba16c01768a38594a3b4c14d",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(InceptionResnetv2_MsCeleb_CenterLoss_2018, self).__init__(
+            checkpoint_path,
+            preprocessor=tf.image.per_image_standardization,
+            memory_demanding=memory_demanding,
+        )
+
+
+class InceptionResnetv2_Casia_CenterLoss_2018(TensorflowTransformer):
+    """
+    InceptionResnet v2 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
+        ]
+
+        filename = get_file(
+            "inceptionresnetv2_casia_centerloss_2018.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/inceptionresnetv2_casia_centerloss_2018",
+            file_hash="1e0b62e45430a8d7516d7a6101a24c40",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(InceptionResnetv2_Casia_CenterLoss_2018, self).__init__(
+            checkpoint_path,
+            preprocessor=tf.image.per_image_standardization,
+            memory_demanding=memory_demanding,
+        )
+
+
+class InceptionResnetv1_Casia_CenterLoss_2018(TensorflowTransformer):
+    """
+    InceptionResnet v1 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
+        ]
+
+        filename = get_file(
+            "inceptionresnetv1_casia_centerloss_2018.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/inceptionresnetv1_casia_centerloss_2018",
+            file_hash="6601e6f6840ae863c7daf31a7c6b9a27",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(InceptionResnetv1_Casia_CenterLoss_2018, self).__init__(
+            checkpoint_path,
+            preprocessor=tf.image.per_image_standardization,
+            memory_demanding=memory_demanding,
+        )
+
+
+class InceptionResnetv1_MsCeleb_CenterLoss_2018(TensorflowTransformer):
+    """
+    InceptionResnet v1 model trained in 2018 using the MsCeleb dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
+        ]
+
+        filename = get_file(
+            "inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/inceptionresnetv1_msceleb_centerloss_2018",
+            file_hash="1ca0149619e4e9320a927ea65b2b5521",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(InceptionResnetv1_MsCeleb_CenterLoss_2018, self).__init__(
+            checkpoint_path,
+            preprocessor=tf.image.per_image_standardization,
+            memory_demanding=memory_demanding,
+        )
+
+
+class FaceNetSanderberg_20170512_110547(TensorflowTransformer):
+    """
+    Wrapper for the free FaceNet from David Sanderberg model 20170512_110547:
+    https://github.com/davidsandberg/facenet
+
+    And for a preprocessor you can use::
+
+        from bob.bio.face.preprocessor import FaceCrop
+        # This is the size of the image that this model expects
+        CROPPED_IMAGE_HEIGHT = 160
+        CROPPED_IMAGE_WIDTH = 160
+        # eye positions for frontal images
+        RIGHT_EYE_POS = (46, 53)
+        LEFT_EYE_POS = (46, 107)
+        # Crops the face using eye annotations
+        preprocessor = FaceCrop(
+            cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
+            cropped_positions={'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
+            color_channel='rgb'
+        )
+    """
+
+    def __init__(self, memory_demanding=False):
+        urls = [
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/facenet_sanderberg_20170512_110547.tar.gz"
+        ]
+
+        filename = get_file(
+            "facenet_sanderberg_20170512_110547.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/facenet_sanderberg_20170512_110547",
+            file_hash="734d1c997c10acdcdffc79fb51a2e715",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(FaceNetSanderberg_20170512_110547, self).__init__(
+            checkpoint_path,
+            tf.image.per_image_standardization,
+            memory_demanding=memory_demanding,
+        )
+
+
+class Resnet50_MsCeleb_ArcFace_2021(TensorflowTransformer):
+    """
+    Resnet50 Backbone trained with the MSCeleb 1M database.
+
+    The bottleneck layer (a.k.a embedding) has 512d.
+
+    The configuration file used to trained is:
+
+    .. warning::
+        This configuration file might change in future releases
+
+    ```yaml
+    batch-size: 128
+    face-size: 112
+    face-output_size: 112
+    n-classes: 85742
+
+
+    ## Backbone
+    backbone: 'resnet50'
+    head: 'arcface'
+    s: 10
+    bottleneck: 512
+    m: 0.5
+
+    # Training parameters
+    solver: "sgd"
+    lr: 0.1
+    dropout-rate: 0.5
+    epochs: 500
+
+
+    train-tf-record-path: "<PATH>"
+    validation-tf-record-path: "<PATH>"
+
+    ```
+
+
+    """
+
+    def __init__(self, memory_demanding=False):
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_2021-48ec5cb8.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_2021-48ec5cb8.tar.gz",
+        ]
+
+        filename = get_file(
+            "resnet50-msceleb-arcface_2021-48ec5cb8.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/resnet50-msceleb-arcface_2021-48ec5cb8",
+            file_hash="17946f121af5ddd18c637c4620e54da6",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(Resnet50_MsCeleb_ArcFace_2021, self).__init__(
+            checkpoint_path,
+            preprocessor=lambda X: X / 255.0,
+            memory_demanding=memory_demanding,
+        )
+
+
+class Resnet50_MsCeleb_ArcFace_20210521(TensorflowTransformer):
+    """
+    Resnet50 Backbone trained with the MSCeleb 1M database. The bottleneck layer (a.k.a embedding) has 512d.
+
+    The difference from this one to :any:`Resnet50_MsCeleb_ArcFace_2021` is the MSCeleb version used to train it.
+    This one uses 100% of the data pruned from annotators.
+
+
+    The configuration file used to trained is:    
+
+    .. warning::
+        This configuration file might change in future releases
+
+
+    ```yaml
+    batch-size: 128
+    face-size: 112
+    face-output_size: 112
+    n-classes: 83009
+
+
+    ## Backbone
+    backbone: 'resnet50'
+    head: 'arcface'
+    s: 30
+    bottleneck: 512
+    m: 0.5
+
+    # Training parameters
+    solver: "sgd"
+    lr: 0.1
+    dropout-rate: 0.5
+    epochs: 300
+
+
+    train-tf-record-path: "<PATH>"
+    validation-tf-record-path: "<PATH>"
+
+    ```
+
+
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_20210521-e9bc085c.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50-msceleb-arcface_20210521-e9bc085c.tar.gz",
+        ]
+
+        filename = get_file(
+            "resnet50-msceleb-arcface_20210521-e9bc085c.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/resnet50-msceleb-arcface_20210521-801991f0",
+            file_hash="e33090eea4951ce80be4620a0dac680d",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(Resnet50_MsCeleb_ArcFace_20210521, self).__init__(
+            checkpoint_path,
+            preprocessor=lambda X: X / 255.0,
+            memory_demanding=memory_demanding,
+        )
+
+
+class Resnet50_VGG2_ArcFace_2021(TensorflowTransformer):
+    """
+    Resnet50 Backbone trained with the VGG2 database.
+
+    The bottleneck layer (a.k.a embedding) has 512d.
+
+    The configuration file used to trained is:
+
+    .. warning::
+        This configuration file might change in future releases
+
+    ```yaml
+    batch-size: 128
+    face-size: 112
+    face-output_size: 112
+    n-classes: 8631
+
+
+    ## Backbone
+    backbone: 'resnet50'
+    head: 'arcface'
+    s: 64
+    bottleneck: 512
+    m: 0.5
+
+    # Training parameters
+    solver: "sgd"
+    lr: 0.1
+    dropout-rate: 0.5
+    epochs: 1047
+
+
+    train-tf-record-path: "<PATH>"
+    validation-tf-record-path: "<PATH>"
+
+    ```
+
+
+    """
+
+    def __init__(self, memory_demanding=False):
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/resnet50_vgg2_arcface_2021.tar.gz",
+        ]
+
+        filename = get_file(
+            "resnet50_vgg2_arcface_2021.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/resnet50_vgg2_arcface_2021",
+            file_hash="64f89c8cb55e7a0d9c7e13ff412b6a13",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(Resnet50_VGG2_ArcFace_2021, self).__init__(
+            checkpoint_path,
+            preprocessor=lambda X: X / 255.0,
+            memory_demanding=memory_demanding,
+        )
+
+    def inference(self, X):
+        if self.preprocessor is not None:
+            X = self.preprocessor(tf.cast(X, "float32"))
+
+        prelogits = self.model.predict_on_batch(X)
+        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
+        return embeddings
+
+
+class MobileNetv2_MsCeleb_ArcFace_2021(TensorflowTransformer):
+    """
+    MobileNet Backbone trained with the MSCeleb 1M database.
+
+    The bottleneck layer (a.k.a embedding) has 512d.
+
+    The configuration file used to trained is:
+
+    .. warning::
+        This configuration file might change in future releases
+
+    ```yaml
+    batch-size: 128
+    face-size: 112
+    face-output_size: 112
+    n-classes: 85742
+
+
+    ## Backbone
+    backbone: 'mobilenet-v2'
+    head: 'arcface'
+    s: 10
+    bottleneck: 512
+    m: 0.5
+
+    # Training parameters
+    solver: "sgd"
+    lr: 0.01
+    dropout-rate: 0.5
+    epochs: 500
+
+
+    train-tf-record-path: "<PATH>"
+    validation-tf-record-path: "<PATH>"
+
+    ```
+
+
+    """
+
+    def __init__(self, memory_demanding=False):
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021-e012cb66.tar.gz",
+            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/mobilenet-v2-msceleb-arcface-2021-e012cb66.tar.gz",
+        ]
+
+        filename = get_file(
+            "mobilenet-v2-msceleb-arcface-2021-e012cb66.tar.gz",
+            urls,
+            cache_subdir="data/tensorflow/mobilenet-v2-msceleb-arcface-2021-e012cb66",
+            file_hash="dd1399b86f01725c7b07b480b703e02a",
+            extract=True,
+        )
+        checkpoint_path = os.path.dirname(filename)
+
+        super(MobileNetv2_MsCeleb_ArcFace_2021, self).__init__(
+            checkpoint_path,
+            preprocessor=lambda X: X / 255.0,
+            memory_demanding=memory_demanding,
+        )
+
+
+def facenet_template(embedding, annotation_type, fixed_positions=None):
+    """
+    Facenet baseline template.
+    This one will crop the face at :math:`160 \times 160`
+    
+    Parameters
+    ----------
+
+      embedding: obj
+         Transformer that takes a cropped face and extract the embeddings
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+    """
+    # DEFINE CROPPING
+    cropped_image_size = (160, 160)
+    cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    # ASSEMBLE TRANSFORMER
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def resnet_template(embedding, annotation_type, fixed_positions=None):
+    # DEFINE CROPPING
+    cropped_image_size = (112, 112)
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {
+            "leye": (55, 81),
+            "reye": (55, 42),
+        }
+    else:
+        cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
+
+    transformer = embedding_transformer(
+        cropped_image_size=cropped_image_size,
+        embedding=embedding,
+        cropped_positions=cropped_positions,
+        fixed_positions=fixed_positions,
+        color_channel="rgb",
+        annotator="mtcnn",
+    )
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+
+def resnet50_msceleb_arcface_2021(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Resnet50 pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`Resnet50_MsCeleb_ArcFace_2021` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def resnet50_msceleb_arcface_20210521(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Resnet50 pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`Resnet50_MsCeleb_ArcFace_20210521` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=Resnet50_MsCeleb_ArcFace_20210521(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def resnet50_vgg2_arcface_2021(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Resnet50 pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`Resnet50_VGG2_ArcFace_2021` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def mobilenetv2_msceleb_arcface_2021(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the MobileNet pipeline which will crop the face :math:`112 \times 112` and 
+    use the :py:class:`MobileNetv2_MsCeleb_ArcFace_2021` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return resnet_template(
+        embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def facenet_sanderberg_20170512_110547(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Facenet pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`FaceNetSanderberg_20170512_110547` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v1_casia_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v1 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv1_Casia_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv1_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v2_casia_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v2 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv2_Casia_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv2_Casia_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v1_msceleb_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v1 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv1_MsCeleb_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
+
+
+def inception_resnet_v2_msceleb_centerloss_2018(
+    annotation_type, fixed_positions=None, memory_demanding=False
+):
+    """
+    Get the Inception Resnet v2 pipeline which will crop the face :math:`160 \times 160` and 
+    use the :py:class:`InceptionResnetv2_MsCeleb_CenterLoss_2018` to extract the features
+
+    Parameters
+    ----------
+
+      annotation_type: str
+         Type of the annotations (e.g. `eyes-center')
+
+      fixed_positions: dict
+         Set it if in your face images are registered to a fixed position in the image
+
+      memory_demanding: bool
+
+    """
+
+    return facenet_template(
+        embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
+            memory_demanding=memory_demanding
+        ),
+        annotation_type=annotation_type,
+        fixed_positions=fixed_positions,
+    )
diff --git a/bob/bio/face/embeddings/tf2_inception_resnet.py b/bob/bio/face/embeddings/tf2_inception_resnet.py
deleted file mode 100644
index b462f3a885c67153b15a4e5f930637e9ebd14cee..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/tf2_inception_resnet.py
+++ /dev/null
@@ -1,286 +0,0 @@
-import tensorflow as tf
-from bob.learn.tensorflow.utils.image import to_channels_last
-from sklearn.base import TransformerMixin, BaseEstimator
-from sklearn.utils import check_array
-
-from tensorflow.keras import Sequential
-from tensorflow.keras.layers.experimental import preprocessing
-from bob.extension import rc
-from functools import partial
-import pkg_resources
-import os
-from bob.bio.face.embeddings import download_model
-import numpy as np
-
-
-def sanderberg_rescaling():
-    # FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
-    # [-0.99609375, 0.99609375]
-    preprocessor = preprocessing.Rescaling(scale=1 / 128, offset=-127.5 / 128)
-    return preprocessor
-
-
-class TransformTensorflow(TransformerMixin, BaseEstimator):
-    """
-    Base Transformer for Tensorflow architectures.
-
-    Szegedy, Christian, et al. "Inception-v4, inception-resnet and the impact of residual connections on learning." arXiv preprint arXiv:1602.07261 (2016).
-
-    Parameters
-    ----------
-
-    checkpoint_path: str
-       Path containing the checkpoint
-
-    preprocessor:
-        Preprocessor function
-
-    memory_demanding bool
-        If `True`, the `transform` method will run one sample at the time.
-        This is useful when there is not enough memory available to forward big chucks of data.
-    """
-
-    def __init__(
-        self, checkpoint_path, preprocessor=None, memory_demanding=False, **kwargs
-    ):
-        super().__init__(**kwargs)
-        self.checkpoint_path = checkpoint_path
-        self.model = None
-        self.preprocessor = preprocessor
-        self.memory_demanding = memory_demanding
-
-    def load_model(self):
-        self.model = tf.keras.models.load_model(self.checkpoint_path)
-
-    def transform(self, X):
-        def _transform(X):
-            X = tf.convert_to_tensor(X)
-            X = to_channels_last(X)
-
-            if X.shape[-3:] != self.model.input_shape[-3:]:
-                raise ValueError(
-                    f"Image shape {X.shape} not supported. Expected {self.model.input_shape}"
-                )
-
-            return self.inference(X).numpy()
-
-        if self.model is None:
-            self.load_model()
-
-        X = check_array(X, allow_nd=True)
-
-        if self.memory_demanding:
-            return np.array([_transform(x[None, ...]) for x in X])
-        else:
-            return _transform(X)
-
-    def __getstate__(self):
-        # Handling unpicklable objects
-        d = self.__dict__.copy()
-        d["model"] = None
-        return d
-
-    def inference(self, X):
-        if self.preprocessor is not None:
-            X = self.preprocessor(tf.cast(X, "float32"))
-
-        prelogits = self.model.predict_on_batch(X)
-        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
-        return embeddings
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
-
-    def __del__(self):
-        self.model = None
-
-
-class InceptionResnetv2_MsCeleb_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v2 model trained in 2018 using the MSCeleb dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv2_msceleb_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv2_MsCeleb_CenterLoss_2018"]
-            is None
-            else rc["bob.bio.face.models.InceptionResnetv2_MsCeleb_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv2_msceleb_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv2_MsCeleb_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class InceptionResnetv2_Casia_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v2 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv2_casia_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv2_Casia_CenterLoss_2018"] is None
-            else rc["bob.bio.face.models.InceptionResnetv2_Casia_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv2_casia_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv2_Casia_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class InceptionResnetv1_Casia_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v1 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv1_casia_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv1_Casia_CenterLoss_2018"] is None
-            else rc["bob.bio.face.models.InceptionResnetv1_Casia_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv1_casia_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv1_Casia_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class InceptionResnetv1_MsCeleb_CenterLoss_2018(TransformTensorflow):
-    """
-    InceptionResnet v1 model trained in 2018 using the MsCeleb dataset in the context of the work:
-
-    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
-
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "inceptionresnetv1_msceleb_centerloss_2018"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
-            is None
-            else rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
-        )
-
-        urls = [
-            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz",
-        ]
-
-        download_model(
-            checkpoint_path, urls, "inceptionresnetv1_msceleb_centerloss_2018.tar.gz"
-        )
-
-        super(InceptionResnetv1_MsCeleb_CenterLoss_2018, self).__init__(
-            checkpoint_path,
-            preprocessor=tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
-
-class FaceNetSanderberg_20170512_110547(TransformTensorflow):
-    """
-    Wrapper for the free FaceNet from David Sanderberg model 20170512_110547:
-    https://github.com/davidsandberg/facenet
-
-    And for a preprocessor you can use::
-
-        from bob.bio.face.preprocessor import FaceCrop
-        # This is the size of the image that this model expects
-        CROPPED_IMAGE_HEIGHT = 160
-        CROPPED_IMAGE_WIDTH = 160
-        # eye positions for frontal images
-        RIGHT_EYE_POS = (46, 53)
-        LEFT_EYE_POS = (46, 107)
-        # Crops the face using eye annotations
-        preprocessor = FaceCrop(
-            cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
-            cropped_positions={'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
-            color_channel='rgb'
-        )
-    """
-
-    def __init__(self, memory_demanding=False):
-        internal_path = pkg_resources.resource_filename(
-            __name__, os.path.join("data", "facenet_sanderberg_20170512_110547"),
-        )
-
-        checkpoint_path = (
-            internal_path
-            if rc["bob.bio.face.models.facenet_sanderberg_20170512_110547"] is None
-            else rc["bob.bio.face.models.facenet_sanderberg_20170512_110547"]
-        )
-
-        urls = [
-            "http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/facenet_sanderberg_20170512_110547.tar.gz"
-        ]
-
-        download_model(
-            checkpoint_path, urls, "facenet_sanderberg_20170512_110547.tar.gz"
-        )
-
-        super(FaceNetSanderberg_20170512_110547, self).__init__(
-            checkpoint_path,
-            tf.image.per_image_standardization,
-            memory_demanding=memory_demanding,
-        )
-
diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py
index 45727cb29b6d443ce490e94ba148c57f7165f152..851963904bc693be732108999d1eeea29af03f11 100644
--- a/bob/bio/face/extractor/__init__.py
+++ b/bob/bio/face/extractor/__init__.py
@@ -4,7 +4,7 @@ from .LGBPHS import LGBPHS
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
-  """Says object was actually declared here, and not in the import module.
+    """Says object was actually declared here, and not in the import module.
   Fixing sphinx warnings of not being able to find classes, when path is shortened.
   Parameters:
 
@@ -14,11 +14,9 @@ def __appropriate__(*args):
   <https://github.com/sphinx-doc/sphinx/issues/3048>`
   """
 
-  for obj in args: obj.__module__ = __name__
+    for obj in args:
+        obj.__module__ = __name__
 
-__appropriate__(
-    DCTBlocks,
-    GridGraph,
-    LGBPHS,
-    )
-__all__ = [_ for _ in dir() if not _.startswith('_')]
+
+__appropriate__(DCTBlocks, GridGraph, LGBPHS)
+__all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/bio/face/test/test_annotators.py b/bob/bio/face/test/test_annotators.py
index f70d22b8b37138502583729692a01e4f93ea50c3..668c3ccc3c203886a4a2bb376ca7730364062e54 100644
--- a/bob/bio/face/test/test_annotators.py
+++ b/bob/bio/face/test/test_annotators.py
@@ -4,34 +4,49 @@ import bob.io.image
 from bob.bio.face.annotator import (
     BobIpFacedetect,
     BobIpFlandmark,
-    min_face_size_validator)
+    min_face_size_validator,
+)
 from bob.bio.base.annotator import FailSafe
 from bob.bio.face.annotator import BobIpMTCNN
+from bob.bio.face.annotator import BobIpTinyface
 import numpy
 
 from bob.bio.base.test.utils import is_library_available
 
 face_image = bob.io.base.load(
-    bob.io.base.test_utils.datafile(
-        'testimage.jpg', 'bob.ip.facedetect'
-    )
+    bob.io.base.test_utils.datafile("testimage.jpg", "bob.ip.facedetect")
 )
 
+
 def _assert_mtcnn(annot):
     """
     Verifies that the MTCNN annotations are correct for ``faceimage.jpg``
     """
     assert type(annot) is dict, annot
-    assert [int(x) for x in annot['topleft']] == [68, 76], annot
-    assert [int(x) for x in annot['bottomright']] == [344, 274], annot
-    assert [int(x) for x in annot['reye']] == [180, 129], annot
-    assert [int(x) for x in annot['leye']] == [175, 220], annot
-    assert numpy.allclose(annot['quality'], 0.9998975), annot
+    assert [int(x) for x in annot["topleft"]] == [68, 76], annot
+    assert [int(x) for x in annot["bottomright"]] == [344, 274], annot
+    assert [int(x) for x in annot["reye"]] == [180, 129], annot
+    assert [int(x) for x in annot["leye"]] == [175, 220], annot
+    assert numpy.allclose(annot["quality"], 0.9998975), annot
+
+
+def _assert_tinyface(annot):
+    """
+    Verifies that the Tinyface annotations are correct for ``faceimage.jpg``
+    """
+
+    assert type(annot) is dict, annot
+    assert [int(x) for x in annot["topleft"]] == [59, 57], annot
+    assert [int(x) for x in annot["bottomright"]] == [338, 284], annot
+    assert [int(x) for x in annot["reye"]] == [162, 125], annot
+    assert [int(x) for x in annot["leye"]] == [162, 216], annot
+
 
 def _assert_bob_ip_facedetect(annot):
-    assert annot['topleft'] == (110, 82), annot
-    assert annot['bottomright'] == (334, 268), annot
-    assert numpy.allclose(annot['quality'], 39.209601948013685), annot
+    assert annot["topleft"] == (110, 82), annot
+    assert annot["bottomright"] == (334, 268), annot
+    assert numpy.allclose(annot["quality"], 39.209601948013685), annot
+
 
 @is_library_available("tensorflow")
 def test_mtcnn_annotator():
@@ -43,57 +58,71 @@ def test_mtcnn_annotator():
     annot_batch = mtcnn_annotator(batch)
     _assert_mtcnn(annot_batch[0])
 
+
+@is_library_available("mxnet")
+def test_tinyface_annotator():
+    """
+    The Tiny face annotator should return the correct annotations.
+    """
+    tinyface_annotator = BobIpTinyface()
+    batch = [face_image]
+    annot_batch = tinyface_annotator(batch)
+    _assert_tinyface(annot_batch[0])
+
+
 def test_bob_ip_facedetect():
     batch = [face_image]
     annot = BobIpFacedetect()(batch)
     _assert_bob_ip_facedetect(annot[0])
 
+
 def test_bob_ip_facedetect_eyes():
     batch = [face_image]
     annot = BobIpFacedetect(eye_estimate=True)(batch)
     _assert_bob_ip_facedetect(annot[0])
-    assert [int(x) for x in annot[0]['reye']] == [175, 128], annot
-    assert [int(x) for x in annot[0]['leye']] == [175, 221], annot
+    assert [int(x) for x in annot[0]["reye"]] == [175, 128], annot
+    assert [int(x) for x in annot[0]["leye"]] == [175, 221], annot
+
 
 def test_fail_safe():
     annotator = FailSafe(
-        [BobIpFacedetect(eye_estimate=True)],
-        required_keys=('reye', 'leye'),
+        [BobIpFacedetect(eye_estimate=True)], required_keys=("reye", "leye"),
     )
     batch = [face_image]
     annot = annotator(batch)
     _assert_bob_ip_facedetect(annot[0])
-    assert [int(x) for x in annot[0]['reye']] == [175, 128], annot
-    assert [int(x) for x in annot[0]['leye']] == [175, 221], annot
+    assert [int(x) for x in annot[0]["reye"]] == [175, 128], annot
+    assert [int(x) for x in annot[0]["leye"]] == [175, 221], annot
+
 
 def test_bob_ip_flandmark():
     annotator = FailSafe(
-        [BobIpFacedetect(), BobIpFlandmark()],
-        required_keys=('reye', 'leye'),
+        [BobIpFacedetect(), BobIpFlandmark()], required_keys=("reye", "leye"),
     )
     batch = [face_image]
     annot = annotator(batch)
     print(annot)
     _assert_bob_ip_facedetect(annot[0])
-    assert [int(x) for x in annot[0]['reye']] == [183, 127], annot
-    assert [int(x) for x in annot[0]['leye']] == [174, 223], annot
+    assert [int(x) for x in annot[0]["reye"]] == [183, 127], annot
+    assert [int(x) for x in annot[0]["leye"]] == [174, 223], annot
+
 
 def test_min_face_size_validator():
     valid = {
-        'topleft': (0, 0),
-        'bottomright': (32, 32),
+        "topleft": (0, 0),
+        "bottomright": (32, 32),
     }
     assert min_face_size_validator(valid)
 
     not_valid = {
-        'topleft': (0, 0),
-        'bottomright': (28, 33),
+        "topleft": (0, 0),
+        "bottomright": (28, 33),
     }
     assert not min_face_size_validator(not_valid)
 
     not_valid = {
-        'topleft': (0, 0),
-        'bottomright': (33, 28),
+        "topleft": (0, 0),
+        "bottomright": (33, 28),
     }
     assert not min_face_size_validator(not_valid)
 
diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py
index bbe3c4c9afeaa468fd789496e05f6ad88c81885a..3eeee13e281868872a9255b436e403ec57045fa9 100644
--- a/bob/bio/face/test/test_baselines.py
+++ b/bob/bio/face/test/test_baselines.py
@@ -18,11 +18,11 @@ from bob.bio.base.test.utils import is_library_available
 images = dict()
 images["bioref"] = (
     pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.jpg"),
-    {"reye": (131, 176), "leye": (222, 170)},
+    {"reye": (176, 131), "leye": (170, 222)},
 )
 images["probe"] = (
     pkg_resources.resource_filename("bob.bio.face.test", "data/ada.png"),
-    {"reye": (440, 207), "leye": (546, 207)},
+    {"reye": (207, 440), "leye": (207, 546)},
 )
 
 
@@ -80,7 +80,7 @@ def run_baseline(baseline, samples_for_training=[], target_scores=None):
         assert len(checkpoint_scores[0]) == 1
 
         if target_scores is not None:
-            np.allclose(target_scores, scores[0][0].data, atol=10e-3, rtol=10e-3)
+            assert np.allclose(target_scores, scores[0][0].data, atol=10e-5, rtol=10e-5)
 
         assert np.isclose(scores[0][0].data, checkpoint_scores[0][0].data)
 
@@ -113,42 +113,108 @@ def run_baseline(baseline, samples_for_training=[], target_scores=None):
 @pytest.mark.slow
 @is_library_available("tensorflow")
 def test_facenet_baseline():
-    run_baseline("facenet-sanderberg", target_scores=[-0.9220775737526933])
+    run_baseline("facenet-sanderberg", target_scores=-0.9220775737526933)
 
 
 @pytest.mark.slow
 @is_library_available("tensorflow")
 def test_inception_resnetv2_msceleb():
-    run_baseline("inception-resnetv2-msceleb", target_scores=[-0.43447269718504244])
+    run_baseline("inception-resnetv2-msceleb", target_scores=-0.43447269718504244)
 
 
 @pytest.mark.slow
 @is_library_available("tensorflow")
 def test_inception_resnetv2_casiawebface():
-    run_baseline("inception-resnetv2-casiawebface", target_scores=[-0.634583944368043])
+    run_baseline("inception-resnetv2-casiawebface", target_scores=-0.634583944368043)
 
 
 @pytest.mark.slow
 @is_library_available("tensorflow")
 def test_inception_resnetv1_msceleb():
-    run_baseline("inception-resnetv1-msceleb", target_scores=[-0.44497649298306907])
+    run_baseline("inception-resnetv1-msceleb", target_scores=-0.44497649298306907)
 
 
 @pytest.mark.slow
 @is_library_available("tensorflow")
 def test_inception_resnetv1_casiawebface():
-    run_baseline("inception-resnetv1-casiawebface", target_scores=[-0.6411599976437636])
+    run_baseline("inception-resnetv1-casiawebface", target_scores=-0.6411599976437636)
 
 
 @pytest.mark.slow
 @is_library_available("mxnet")
 def test_arcface_insightface():
-    run_baseline("arcface-insightface", target_scores=[-0.0005965275677296544])
+    run_baseline("arcface-insightface", target_scores=-0.0005965275677296544)
+
+
+@pytest.mark.slow
+@is_library_available("tensorflow")
+def test_arcface_resnet50_msceleb_v1():
+    run_baseline("resnet50-msceleb-arcface-2021", target_scores=-0.0008105830382632018)
+
+
+@pytest.mark.slow
+@is_library_available("tensorflow")
+def test_arcface_resnet50_vgg2_v1():
+    run_baseline("resnet50-vgg2-arcface-2021", target_scores=-0.0035127080413503986)
+
+
+@pytest.mark.slow
+@is_library_available("tensorflow")
+def test_arcface_mobilenet_msceleb():
+    run_baseline(
+        "mobilenetv2-msceleb-arcface-2021", target_scores=-9.430960384781972e-05
+    )
+
+
+@pytest.mark.slow
+@is_library_available("tensorflow")
+def test_arcface_resnet50_msceleb_20210521():
+    run_baseline(
+        "resnet50-msceleb-arcface-20210521", target_scores=-0.001238845659379595
+    )
 
 
 def test_gabor_graph():
-    run_baseline("gabor_graph", target_scores=[0.4385451147418939])
+    run_baseline("gabor_graph", target_scores=0.4385451147418939)
 
 
 # def test_lda():
 #    run_baseline("lda", get_fake_samples_for_training())
+
+
+@pytest.mark.slow
+@is_library_available("torch")
+def test_afffe():
+    run_baseline(
+        "afffe", target_scores=-0.27480835869298026,
+    )
+
+
+@pytest.mark.slow
+@is_library_available("torch")
+def test_iresnet34():
+    run_baseline(
+        "iresnet34", target_scores=-0.0003085132478504171,
+    )
+
+
+@pytest.mark.slow
+@is_library_available("torch")
+def test_iresnet50():
+    run_baseline(
+        "iresnet50", target_scores=-0.0013965432856760662,
+    )
+
+
+@pytest.mark.slow
+@is_library_available("torch")
+def test_iresnet100():
+    run_baseline(
+        "iresnet100", target_scores=-0.0002386926047015514,
+    )
+
+
+@pytest.mark.slow
+@is_library_available("cv2")
+def test_vgg16_oxford():
+    run_baseline("vgg16-oxford", target_scores=-0.0019032474437553626)
diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py
index c935beb2848b067fa87a51c9db0ee688f8c961d5..2f205baf90b06ea62ce2611516a6fcb5747bd45a 100644
--- a/bob/bio/face/test/test_extractors.py
+++ b/bob/bio/face/test/test_extractors.py
@@ -26,6 +26,10 @@ import math
 import bob.io.base.test_utils
 
 import pkg_resources
+from bob.db.base import read_annotation_file
+
+import pytest
+from bob.bio.base.test.utils import is_library_available
 
 regenerate_refs = False
 
@@ -178,3 +182,35 @@ def test_lgbphs():
         "bob.bio.face.test", "data/lgbphs_with_phase.hdf5"
     )
     _compare(feature, reference)
+
+
+def test_face_crop(height=112, width=112):
+    # read input
+    image, annotation = _image(), _annotation()
+    CROPPED_IMAGE_HEIGHT = height
+    CROPPED_IMAGE_WIDTH = width
+
+    # preprocessor with fixed eye positions (which correspond to th ones
+    fixed_cropper = bob.bio.face.preprocessor.FaceCrop(
+        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
+        color_channel="rgb",
+        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
+        fixed_positions={"reye": annotation["reye"], "leye": annotation["leye"]},
+    )
+
+    cropped = fixed_cropper.transform([image])
+    return cropped
+
+
+def _image():
+    return bob.io.base.load(
+        pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.jpg")
+    )
+
+
+def _annotation():
+
+    return read_annotation_file(
+        pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.pos"),
+        "named",
+    )
diff --git a/bob/bio/face/test/test_transformers.py b/bob/bio/face/test/test_transformers.py
deleted file mode 100644
index 805a31485df14337a58f1f6d855fd32e66e00811..0000000000000000000000000000000000000000
--- a/bob/bio/face/test/test_transformers.py
+++ /dev/null
@@ -1,21 +0,0 @@
-import numpy as np
-from bob.pipelines import Sample
-from bob.bio.base import load_resource
-from bob.bio.base.test.utils import is_library_available
-import pytest
-
-
-def get_fake_sample(face_size=(160, 160), eyes={"leye": (46, 107), "reye": (46, 53)}):
-    np.random.seed(10)
-    data = np.random.rand(3, 400, 400)
-    annotations = {"leye": (115, 267), "reye": (115, 132)}
-    return Sample(data, key="1", annotations=annotations)
-
-
-def test_lgbphs():
-    transformer = load_resource("lgbphs", "transformer")
-
-    fake_sample = get_fake_sample()
-    transformed_sample = transformer.transform([fake_sample])[0]
-    transformed_data = transformed_sample.data
-    assert transformed_sample.data.shape == (2, 44014)
diff --git a/conda/meta.yaml b/conda/meta.yaml
index d9814e84589733f5c302db0c083a3576da669eba..2f0f5cbf5b41fbf1f36348589057b28a88cae4c3 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -62,8 +62,8 @@ test:
     # runs tests for package only, report only what is in the package
     # creates html and xml reports and place them in specific directories
     - pytest --verbose --cov {{ name }} --cov-report term-missing --cov-report html:{{ project_dir }}/sphinx/coverage --cov-report xml:{{ project_dir }}/coverage.xml --pyargs {{ name }}
-    - sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx
-    - sphinx-build -aEb doctest {{ project_dir }}/doc sphinx
+    - sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx # [linux]
+    - sphinx-build -aEb doctest {{ project_dir }}/doc sphinx # [linux]
     - conda inspect linkages -p $PREFIX {{ name }}  # [not win]
     - conda inspect objects -p $PREFIX {{ name }}  # [osx]
   requires:
diff --git a/develop.cfg b/develop.cfg
index 4ba1def3457b120f4a4b42274acf4491f2037f55..075ac8a3d707b6646edc9aa1f25961f090a47ab6 100644
--- a/develop.cfg
+++ b/develop.cfg
@@ -40,4 +40,3 @@ recipe = bob.buildout:scripts
 dependent-scripts = true
 
 
-
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 2217e5af8bf96bb90dc7b6964dee14f5fbdca275..bd6b5eb94f51a3a0a9cc66bae1cd01b6ebb8dd3b 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -55,4 +55,20 @@ Deep learning baselines
 
 * ``inception-resnetv1-casiawebface``: Inception Resnet v1 model trained using the Casia Web dataset in the context of the work published by [TFP18]_
 
-* ``arcface-insightface``: Arcface model from `Insightface <https://github.com/deepinsight/insightface>`_
+* ``arcface-insightface``: Arcface model (Resnet100 backbone) from `Insightface <https://github.com/deepinsight/insightface>`_
+
+* ``resnet50-msceleb-arcface-2021``: Resnet Arcface model trained with MSCeleb dataset (dataset partially prunned)
+
+* ``resnet50-msceleb-arcface-20210521``: Arcface model trained with MSCeleb dataset (dataset prunned)
+
+* ``resnet50-vgg2-arcface-2021``: Arcface model trained with VGG2 dataset 
+
+* ``iresnet34``: Arcface model (Resnet 34 backbone) from `Pytorch InsightFace <https://github.com/nizhib/pytorch-insightface>`_
+  
+* ``iresnet50``: Arcface model (Resnet 50 backbone) from `Pytorch InsightFace <https://github.com/nizhib/pytorch-insightface>`_
+  
+* ``iresnet100``: Arcface model (Resnet 100 backbone) from `Pytorch InsightFace <https://github.com/nizhib/pytorch-insightface>`_
+
+* ``vgg16-oxford``: VGG16 Face model from `Oxford <https://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/>`_
+
+* ``afffe``: Pytorch network that extracts 1000-dimensional features, trained by Manuel Gunther, as described in [LGB18]_
diff --git a/doc/conf.py b/doc/conf.py
index cc6627616e420fec5db36ff5807f2808f668accb..0da76cc48cb324514096368bce4f31784c14cd61 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -10,24 +10,24 @@ import pkg_resources
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
-needs_sphinx = '1.3'
+needs_sphinx = "1.3"
 
 # Add any Sphinx extension module names here, as strings. They can be extensions
 # coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
 extensions = [
-    'sphinx.ext.todo',
-    'sphinx.ext.coverage',
-    'sphinx.ext.ifconfig',
-    'sphinx.ext.autodoc',
-    'sphinx.ext.autosummary',
-    'sphinx.ext.doctest',
-    'sphinx.ext.graphviz',
-    'sphinx.ext.intersphinx',
-    'sphinx.ext.napoleon',
-    'sphinx.ext.viewcode',
-    'sphinx.ext.mathjax',
-    'matplotlib.sphinxext.plot_directive'
-    ]
+    "sphinx.ext.todo",
+    "sphinx.ext.coverage",
+    "sphinx.ext.ifconfig",
+    "sphinx.ext.autodoc",
+    "sphinx.ext.autosummary",
+    "sphinx.ext.doctest",
+    "sphinx.ext.graphviz",
+    "sphinx.ext.intersphinx",
+    "sphinx.ext.napoleon",
+    "sphinx.ext.viewcode",
+    "sphinx.ext.mathjax",
+    "matplotlib.sphinxext.plot_directive",
+]
 
 # Be picky about warnings
 nitpicky = False
@@ -36,13 +36,13 @@ nitpicky = False
 nitpick_ignore = []
 
 # Allows the user to override warnings from a separate file
-if os.path.exists('nitpick-exceptions.txt'):
-    for line in open('nitpick-exceptions.txt'):
+if os.path.exists("nitpick-exceptions.txt"):
+    for line in open("nitpick-exceptions.txt"):
         if line.strip() == "" or line.startswith("#"):
             continue
         dtype, target = line.split(None, 1)
         target = target.strip()
-        try: # python 2.x
+        try:  # python 2.x
             target = unicode(target)
         except NameError:
             pass
@@ -58,25 +58,27 @@ autosummary_generate = True
 numfig = True
 
 # If we are on OSX, the 'dvipng' path maybe different
-dvipng_osx = '/opt/local/libexec/texlive/binaries/dvipng'
-if os.path.exists(dvipng_osx): pngmath_dvipng = dvipng_osx
+dvipng_osx = "/opt/local/libexec/texlive/binaries/dvipng"
+if os.path.exists(dvipng_osx):
+    pngmath_dvipng = dvipng_osx
 
 # Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
+templates_path = ["_templates"]
 
 # The suffix of source filenames.
-source_suffix = '.rst'
+source_suffix = ".rst"
 
 # The encoding of source files.
-#source_encoding = 'utf-8-sig'
+# source_encoding = 'utf-8-sig'
 
 # The master toctree document.
-master_doc = 'index'
+master_doc = "index"
 
 # General information about the project.
-project = u'bob.bio.face'
+project = u"bob.bio.face"
 import time
-copyright = u'%s, Idiap Research Institute' % time.strftime('%Y')
+
+copyright = u"%s, Idiap Research Institute" % time.strftime("%Y")
 
 # Grab the setup entry
 distribution = pkg_resources.require(project)[0]
@@ -92,42 +94,42 @@ release = distribution.version
 
 # The language for content autogenerated by Sphinx. Refer to documentation
 # for a list of supported languages.
-#language = None
+# language = None
 
 # There are two options for replacing |today|: either, you set today to some
 # non-false value, then it is used:
-#today = ''
+# today = ''
 # Else, today_fmt is used as the format for a strftime call.
-#today_fmt = '%B %d, %Y'
+# today_fmt = '%B %d, %Y'
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-exclude_patterns = ['links.rst']
+exclude_patterns = ["links.rst"]
 
 # The reST default role (used for this markup: `text`) to use for all documents.
-#default_role = None
+# default_role = None
 
 # If true, '()' will be appended to :func: etc. cross-reference text.
-#add_function_parentheses = True
+# add_function_parentheses = True
 
 # If true, the current module name will be prepended to all description
 # unit titles (such as .. function::).
-#add_module_names = True
+# add_module_names = True
 
 # If true, sectionauthor and moduleauthor directives will be shown in the
 # output. They are ignored by default.
-#show_authors = False
+# show_authors = False
 
 # The name of the Pygments (syntax highlighting) style to use.
-pygments_style = 'sphinx'
+pygments_style = "sphinx"
 
 # A list of ignored prefixes for module index sorting.
-#modindex_common_prefix = []
+# modindex_common_prefix = []
 
 # Some variables which are useful for generated material
-project_variable = project.replace('.', '_')
-short_description = u'Tools for running face recognition experiments'
-owner = [u'Idiap Research Institute']
+project_variable = project.replace(".", "_")
+short_description = u"Tools for running face recognition experiments"
+owner = [u"Idiap Research Institute"]
 
 
 # -- Options for HTML output ---------------------------------------------------
@@ -135,80 +137,81 @@ owner = [u'Idiap Research Institute']
 # The theme to use for HTML and HTML Help pages.  See the documentation for
 # a list of builtin themes.
 import sphinx_rtd_theme
-html_theme = 'sphinx_rtd_theme'
+
+html_theme = "sphinx_rtd_theme"
 
 # Theme options are theme-specific and customize the look and feel of a theme
 # further.  For a list of options available for each theme, see the
 # documentation.
-#html_theme_options = {}
+# html_theme_options = {}
 
 # Add any paths that contain custom themes here, relative to this directory.
 html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
 
 # The name for this set of Sphinx documents.  If None, it defaults to
 # "<project> v<release> documentation".
-#html_title = None
+# html_title = None
 
 # A shorter title for the navigation bar.  Default is the same as html_title.
-#html_short_title = project_variable
+# html_short_title = project_variable
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-html_logo = 'img/logo.png'
+html_logo = "img/logo.png"
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-html_favicon = 'img/favicon.ico'
+html_favicon = "img/favicon.ico"
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
 # so a file named "default.css" will overwrite the builtin "default.css".
-#html_static_path = ['_static']
+# html_static_path = ['_static']
 
 # If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
 # using the given strftime format.
-#html_last_updated_fmt = '%b %d, %Y'
+# html_last_updated_fmt = '%b %d, %Y'
 
 # If true, SmartyPants will be used to convert quotes and dashes to
 # typographically correct entities.
-#html_use_smartypants = True
+# html_use_smartypants = True
 
 # Custom sidebar templates, maps document names to template names.
-#html_sidebars = {}
+# html_sidebars = {}
 
 # Additional templates that should be rendered to pages, maps page names to
 # template names.
-#html_additional_pages = {}
+# html_additional_pages = {}
 
 # If false, no module index is generated.
-#html_domain_indices = True
+# html_domain_indices = True
 
 # If false, no index is generated.
-#html_use_index = True
+# html_use_index = True
 
 # If true, the index is split into individual pages for each letter.
-#html_split_index = False
+# html_split_index = False
 
 # If true, links to the reST sources are added to the pages.
-#html_show_sourcelink = True
+# html_show_sourcelink = True
 
 # If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
-#html_show_sphinx = True
+# html_show_sphinx = True
 
 # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
-#html_show_copyright = True
+# html_show_copyright = True
 
 # If true, an OpenSearch description file will be output, and all pages will
 # contain a <link> tag referring to it.  The value of this option must be the
 # base URL from which the finished HTML is served.
-#html_use_opensearch = ''
+# html_use_opensearch = ''
 
 # This is the file name suffix for HTML files (e.g. ".xhtml").
-#html_file_suffix = None
+# html_file_suffix = None
 
 # Output file base name for HTML help builder.
-htmlhelp_basename = project_variable + u'_doc'
+htmlhelp_basename = project_variable + u"_doc"
 
 
 # -- Post configuration --------------------------------------------------------
@@ -218,26 +221,26 @@ rst_epilog = """
 .. |project| replace:: Bob
 .. |version| replace:: %s
 .. |current-year| date:: %%Y
-""" % (version,)
+""" % (
+    version,
+)
 
 # Default processing flags for sphinx
-autoclass_content = 'class'
-autodoc_member_order = 'bysource'
+autoclass_content = "class"
+autodoc_member_order = "bysource"
 autodoc_default_options = {
-  "members": True,
-  "undoc-members": True,
-  "show-inheritance": True,
+    "members": True,
+    "undoc-members": True,
+    "show-inheritance": True,
 }
 
 # For inter-documentation mapping:
 from bob.extension.utils import link_documentation, load_requirements
+
 sphinx_requirements = "extra-intersphinx.txt"
 if os.path.exists(sphinx_requirements):
-  intersphinx_mapping = link_documentation(
-      additional_packages=['python','numpy'] + \
-          load_requirements(sphinx_requirements)
-          )
+    intersphinx_mapping = link_documentation(
+        additional_packages=["python", "numpy"] + load_requirements(sphinx_requirements)
+    )
 else:
-  intersphinx_mapping = link_documentation()
-
-
+    intersphinx_mapping = link_documentation()
diff --git a/doc/img/mxnet_lfw_pipe.png b/doc/img/mxnet_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..802d11f72d753bb832a9d65760e03e3338115c6f
Binary files /dev/null and b/doc/img/mxnet_lfw_pipe.png differ
diff --git a/doc/img/opencv_lfw_pipe.png b/doc/img/opencv_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..4d67b3c175e954919118fbd76a36c897ed7c0bba
Binary files /dev/null and b/doc/img/opencv_lfw_pipe.png differ
diff --git a/doc/img/pytorch_lfw_pipe.png b/doc/img/pytorch_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..c68d7b53e6306d0d15ed78cdb421df6db2f14085
Binary files /dev/null and b/doc/img/pytorch_lfw_pipe.png differ
diff --git a/doc/img/pytorch_library_lfw_pipe.png b/doc/img/pytorch_library_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..162a148c0ceaa545b1f8654e28a6f5fbb91b3394
Binary files /dev/null and b/doc/img/pytorch_library_lfw_pipe.png differ
diff --git a/doc/img/tensorflow_lfw_pipe.png b/doc/img/tensorflow_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..29dbc0fabfe9c0e9d5a47cb5b67fa6921dfca0de
Binary files /dev/null and b/doc/img/tensorflow_lfw_pipe.png differ
diff --git a/doc/implemented.rst b/doc/implemented.rst
index a53938b64b5b8527bb7be9edaafc01a9a6ebe0ee..4e3d967d5822412c9f8266d2c95af6b4aebf02d7 100644
--- a/doc/implemented.rst
+++ b/doc/implemented.rst
@@ -9,7 +9,6 @@ Summary
 
 Databases
 ~~~~~~~~~
-
 .. autosummary::
    bob.bio.face.database.ARFaceBioDatabase
    bob.bio.face.database.AtntBioDatabase
@@ -27,6 +26,49 @@ Databases
    bob.bio.face.database.CBSRNirVis2Database
 
 
+Deep Learning Extractors
+~~~~~~~~~~~~~~~~~~~~~~~~
+   
+
+Tensorflow models
+=================
+
+.. autosummary::
+   bob.bio.face.embeddings.tensorflow.facenet_sanderberg_20170512_110547
+   bob.bio.face.embeddings.tensorflow.resnet50_msceleb_arcface_2021
+   bob.bio.face.embeddings.tensorflow.resnet50_msceleb_arcface_20210521
+   bob.bio.face.embeddings.tensorflow.resnet50_vgg2_arcface_2021
+   bob.bio.face.embeddings.tensorflow.mobilenetv2_msceleb_arcface_2021
+   bob.bio.face.embeddings.tensorflow.inception_resnet_v1_msceleb_centerloss_2018
+   bob.bio.face.embeddings.tensorflow.inception_resnet_v2_msceleb_centerloss_2018
+   bob.bio.face.embeddings.tensorflow.inception_resnet_v1_casia_centerloss_2018
+   bob.bio.face.embeddings.tensorflow.inception_resnet_v2_casia_centerloss_2018
+   
+   
+PyTorch models
+==============
+   
+.. autosummary::
+   bob.bio.face.embeddings.pytorch.afffe_baseline
+   bob.bio.face.embeddings.pytorch.iresnet34
+   bob.bio.face.embeddings.pytorch.iresnet50
+   bob.bio.face.embeddings.pytorch.iresnet100
+
+MxNET models
+============
+   
+.. autosummary::
+   bob.bio.face.embeddings.mxnet.arcface_insightFace_lresnet100
+
+Caffe models
+============
+
+.. autosummary::
+   bob.bio.face.embeddings.opencv.vgg16_oxford_baseline
+
+   
+
+
 Face Image Annotators
 ~~~~~~~~~~~~~~~~~~~~~
 
@@ -35,6 +77,7 @@ Face Image Annotators
    bob.bio.face.annotator.BobIpFacedetect
    bob.bio.face.annotator.BobIpFlandmark
    bob.bio.face.annotator.BobIpMTCNN
+   bob.bio.face.annotator.BobIpTinyface
 
 
 Image Preprocessors
@@ -58,6 +101,9 @@ Image Feature Extractors
    bob.bio.face.extractor.GridGraph
    bob.bio.face.extractor.LGBPHS
 
+   
+
+
 
 Face Recognition Algorithms
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/index.rst b/doc/index.rst
index 776d35c6c8d8eaf706e1ba713f3fc8f1c75eee69..c7c1733f3800998ed2485379a10c775018c444f3 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -48,7 +48,7 @@ Users Guide
    :maxdepth: 2
 
    baselines
-   leaderboard/leaderboard
+   leaderboard/leaderboard   
    references
    annotators
    faq
diff --git a/doc/leaderboard/mobio.rst b/doc/leaderboard/mobio.rst
index 708566505b5b666b590f415f0b2f2545b25048b4..0d92490d2a8e14143cebf9ebf07f852c83597b4e 100644
--- a/doc/leaderboard/mobio.rst
+++ b/doc/leaderboard/mobio.rst
@@ -7,7 +7,52 @@ Mobio Dataset
 =============
 
 
-.. todo::
-   Benchmarks on Mobio Database
+The MOBIO dataset is a video database containing bimodal data (face/speaker).
+It is composed by 152 people (split in the two genders male and female), mostly Europeans, split in 5 sessions (few weeks time lapse between sessions).
+The database was recorded using two types of mobile devices: mobile phones (NOKIA N93i) and laptop 
+computers(standard 2008 MacBook).
+
+For face recognition images are used instead of videos.
+One image was extracted from each video by choosing the video frame after 10 seconds.
+The eye positions were manually labelled and distributed with the database.
+
+For more information check:
+
+.. code-block:: latex
+
+    @article{McCool_IET_BMT_2013,
+        title = {Session variability modelling for face authentication},
+        author = {McCool, Chris and Wallace, Roy and McLaren, Mitchell and El Shafey, Laurent and Marcel, S{\'{e}}bastien},
+        month = sep,
+        journal = {IET Biometrics},
+        volume = {2},
+        number = {3},
+        year = {2013},
+        pages = {117-129},
+        issn = {2047-4938},
+        doi = {10.1049/iet-bmt.2012.0059},
+    }
+
+
+Benchmarks
+==========
+    
+You can run the mobio baselines command with a simple command such as:
+
+.. code-block:: bash
+
+   bob bio pipeline vanilla-biometrics mobio-male arcface-insightface
+
+
+Scores from some of our baselines can be found `here <https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/scores/mobio-male.tar.gz>`_.
+A det curve can be generated with these scores by running the following commands:
+
+.. code-block:: bash
+
+   wget https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/scores/mobio-male.tar.gz   
+   tar -xzvf mobio-male.tar.gz
+   bob bio det ./mobio-male/{arcface_insightFace_lresnet100,inception_resnet_v2_msceleb_centerloss_2018,iresnet50,iresnet100,mobilenetv2_msceleb_arcface_2021,resnet50_msceleb_arcface_20210521,vgg16_oxford_baseline,afffe_baseline}/scores-{dev,eval} --legends arcface_insightFace_lresnet100,inception_resnet_v2_msceleb_centerloss_2018,iresnet50,iresnet100,mobilenetv2_msceleb_arcface_2021,resnet50_msceleb_arcface_20210521,vgg16_oxford_baseline,afffe -S -e --figsize 16,8
+
+and get the following :download:`plot <./plots/det-mobio-male.pdf>`.
+
 
-   Probably for Manuel's students
\ No newline at end of file
diff --git a/doc/leaderboard/plots/det-mobio-male.pdf b/doc/leaderboard/plots/det-mobio-male.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..f2794c3965c31ae300e02c3381e91d30e2a2f583
Binary files /dev/null and b/doc/leaderboard/plots/det-mobio-male.pdf differ
diff --git a/doc/references.rst b/doc/references.rst
index ea60e1ad5e565618d66c19fe260dabb1b4b4df45..cffb6440d82d908a3869dc61bc66240b5b1ec38b 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -17,3 +17,4 @@ References
 .. [ZSQ09]  *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
 .. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
 .. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
+.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. IEEE Winter Conference on Applications of Computer Vision (WACV), 2018.
\ No newline at end of file
diff --git a/notebooks/50-shades-of-face.ipynb b/notebooks/50-shades-of-face.ipynb
index 0e9877223ac462c852f83152c04994a1f537c34b..2a99754a02b22caf0b8e7879aa28f7bc137bc2ae 100644
--- a/notebooks/50-shades-of-face.ipynb
+++ b/notebooks/50-shades-of-face.ipynb
@@ -21,11 +21,10 @@
     "from bob.bio.base.pipelines.vanilla_biometrics import execute_vanilla_biometrics\n",
     "from bob.bio.base.pipelines.vanilla_biometrics import Distance\n",
     "from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline\n",
-    "\n",
-    "from bob.bio.face.database import MobioDatabase\n",
+    "from bob.pipelines import wrap\n",
     "from bob.bio.face.preprocessor import FaceCrop\n",
+    "from bob.bio.face.database import MobioDatabase\n",
     "from bob.extension import rc\n",
-    "from bob.pipelines import wrap\n",
     "import os\n",
     "import scipy.spatial\n",
     "import bob.measure\n",
@@ -47,8 +46,8 @@
     "\n",
     "\n",
     "######## CHANGE YOUR FEATURE EXTRACTOR HERE\n",
-    "from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace\n",
-    "extractor_transformer = wrap([\"sample\"],ArcFaceInsightFace())\n",
+    "from bob.bio.face.embeddings.mxnet import ArcFaceInsightFace_LResNet100\n",
+    "extractor_transformer = wrap([\"sample\"],ArcFaceInsightFace_LResNet100())\n",
     "\n",
     "### CHANGE YOUR MATCHER HERE\n",
     "algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)\n",
@@ -106,7 +105,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "365e408f16e04de7ba0f709639b4ee8d",
+       "model_id": "4f9fcc28865e4adaa6c989831f773b0c",
        "version_major": 2,
        "version_minor": 0
       },
@@ -220,32 +219,6 @@
      "name": "stderr",
      "output_type": "stream",
      "text": [
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
-      "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
       "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
       "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
       "There's no data to train background model.For the rest of the execution it will be assumed that the pipeline is stateless.\n",
@@ -255,7 +228,7 @@
     {
      "data": {
       "application/vnd.jupyter.widget-view+json": {
-       "model_id": "6556949a548c4f1da86073f0a9351109",
+       "model_id": "3d36c081e56c4d93b3351b82209a8fb7",
        "version_major": 2,
        "version_minor": 0
       },
diff --git a/notebooks/Extract_ArcFace_from_MOBIO.ipynb b/notebooks/Extract_ArcFace_from_MOBIO.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..826403f77664cb399b3e1fdb21012016daf70d4a
--- /dev/null
+++ b/notebooks/Extract_ArcFace_from_MOBIO.ipynb
@@ -0,0 +1,269 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Extracting embedding features from face data\n",
+    "In this notebook, we aim to extract embedding features from images using face recogntion extractors.\n",
+    "As an example, we use MOBIO dataset, and extract Arcface features from the face images:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "##### CHANGE YOUR DATABASE HERE\n",
+    "from bob.bio.face.config.database.mobio_male import database\n",
+    "annotation_type = database.annotation_type\n",
+    "fixed_positions = database.fixed_positions\n",
+    "memory_demanding = True\n",
+    "dask_client = None"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from bob.bio.face.embeddings.mxnet import arcface_insightFace_lresnet100\n",
+    "pipeline = arcface_insightFace_lresnet100(annotation_type=annotation_type,\n",
+    "                                          fixed_positions=fixed_positions,\n",
+    "                                          memory_demanding=memory_demanding)\n",
+    "transformer = pipeline.transformer"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Pipeline(steps=[('ToDaskBag', ToDaskBag()),\n",
+      "                ('samplewrapper-1',\n",
+      "                 DaskWrapper(estimator=CheckpointWrapper(estimator=SampleWrapper(estimator=FaceCrop(annotator=BobIpMTCNN(),\n",
+      "                                                                                                    cropped_image_size=(112,\n",
+      "                                                                                                                        112),\n",
+      "                                                                                                    cropped_positions={'leye': (55,\n",
+      "                                                                                                                                81),\n",
+      "                                                                                                                       'reye': (55,\n",
+      "                                                                                                                                42)}),\n",
+      "                                                                                 fit_extra_arguments=(),\n",
+      "                                                                                 transform_extra_arguments=(('annotations',\n",
+      "                                                                                                             'annotations'),)),\n",
+      "                                                         features_dir='featur...\n",
+      "                                                         save_func=<function save at 0x7f345a1224d0>))),\n",
+      "                ('samplewrapper-2',\n",
+      "                 DaskWrapper(estimator=CheckpointWrapper(estimator=SampleWrapper(estimator=ArcFaceInsightFace_LResNet100(memory_demanding=True),\n",
+      "                                                                                 fit_extra_arguments=(),\n",
+      "                                                                                 transform_extra_arguments=()),\n",
+      "                                                         features_dir='features/samplewrapper-2',\n",
+      "                                                         load_func=<function load at 0x7f345a122320>,\n",
+      "                                                         save_func=<function save at 0x7f345a1224d0>)))])\n"
+     ]
+    }
+   ],
+   "source": [
+    "from bob.pipelines import wrap\n",
+    "\n",
+    "\n",
+    "features_dir = \"features\" #Path to store extracted features\n",
+    "# Wrapping with CHECKPOINT and DASK\n",
+    "transformer = wrap([\"checkpoint\",\"dask\"],\n",
+    "                   transformer,\n",
+    "                   features_dir=features_dir)\n",
+    "\n",
+    "# Printing the setup of the transformer\n",
+    "print(transformer)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As an example, we consider 10 samples from this database and extract features for these samples:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# get 10 samples from database\n",
+    "samples = database.all_samples()[:10]"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 15,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# Setting the DASK client\n",
+    "# HERE MAKE ABSOLUTELLY SURE THAT YOU DO `SETSHELL grid` \n",
+    "# BEFORE STARTING THE NOTEBOOK\n",
+    "\n",
+    "from dask.distributed import Client\n",
+    "from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster\n",
+    "\n",
+    "cluster = SGEMultipleQueuesCluster(min_jobs=1)\n",
+    "dask_client = Client(cluster)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 16,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "\n",
+    "features = transformer.transform(samples)\n",
+    "if dask_client is not None:\n",
+    "    features = features.compute(scheduler=dask_client)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In the following cells, we convert the extracted features to `numpy.array` and check the size of features."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 17,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import numpy as np\n",
+    "from bob.pipelines import SampleBatch\n",
+    "\n",
+    "np_features = np.array(SampleBatch(features))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 18,
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "array([[[ 0.5345935 , -1.0668839 , -0.62798595, ..., -0.78859204,\n",
+       "         -0.5147211 ,  2.1415784 ]],\n",
+       "\n",
+       "       [[ 0.24587776, -1.1436105 , -0.21513344, ..., -0.4950465 ,\n",
+       "         -0.7586405 ,  1.9262394 ]],\n",
+       "\n",
+       "       [[-0.01235329, -1.0903177 , -0.7307515 , ..., -1.5341333 ,\n",
+       "         -0.9396954 ,  1.8103021 ]],\n",
+       "\n",
+       "       ...,\n",
+       "\n",
+       "       [[ 0.46007535, -0.9715014 , -0.52703196, ..., -0.29170716,\n",
+       "         -0.74297565,  1.8094344 ]],\n",
+       "\n",
+       "       [[ 0.6113469 , -1.1828535 , -0.19491309, ..., -0.22889124,\n",
+       "         -0.58382076,  2.185493  ]],\n",
+       "\n",
+       "       [[ 0.71980965, -0.4669612 , -0.49327967, ...,  0.0910981 ,\n",
+       "         -0.65268064,  0.93472594]]], dtype=float32)"
+      ]
+     },
+     "execution_count": 18,
+     "metadata": {},
+     "output_type": "execute_result"
+    }
+   ],
+   "source": [
+    "np_features"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 19,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "tornado.application - ERROR - Exception in callback functools.partial(<function TCPServer._handle_connection.<locals>.<lambda> at 0x7f3470e7c8c0>, <Task finished coro=<BaseTCPListener._handle_stream() done, defined at /idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py:445> exception=ValueError('invalid operation on non-started TCPListener')>)\n",
+      "Traceback (most recent call last):\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n",
+      "    ret = callback()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/tornado/tcpserver.py\", line 327, in <lambda>\n",
+      "    gen.convert_yielded(future), lambda f: f.result()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 451, in _handle_stream\n",
+      "    logger.debug(\"Incoming connection from %r to %r\", address, self.contact_address)\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 486, in contact_address\n",
+      "    host, port = self.get_host_port()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 467, in get_host_port\n",
+      "    self._check_started()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 443, in _check_started\n",
+      "    raise ValueError(\"invalid operation on non-started TCPListener\")\n",
+      "ValueError: invalid operation on non-started TCPListener\n",
+      "tornado.application - ERROR - Exception in callback functools.partial(<function TCPServer._handle_connection.<locals>.<lambda> at 0x7f3470e7ce60>, <Task finished coro=<BaseTCPListener._handle_stream() done, defined at /idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py:445> exception=ValueError('invalid operation on non-started TCPListener')>)\n",
+      "Traceback (most recent call last):\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/tornado/ioloop.py\", line 743, in _run_callback\n",
+      "    ret = callback()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/tornado/tcpserver.py\", line 327, in <lambda>\n",
+      "    gen.convert_yielded(future), lambda f: f.result()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 451, in _handle_stream\n",
+      "    logger.debug(\"Incoming connection from %r to %r\", address, self.contact_address)\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 486, in contact_address\n",
+      "    host, port = self.get_host_port()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 467, in get_host_port\n",
+      "    self._check_started()\n",
+      "  File \"/idiap/user/tpereira/conda/envs/bob.nightlies/lib/python3.7/site-packages/distributed/comm/tcp.py\", line 443, in _check_started\n",
+      "    raise ValueError(\"invalid operation on non-started TCPListener\")\n",
+      "ValueError: invalid operation on non-started TCPListener\n",
+      "distributed.client - ERROR - Failed to reconnect to scheduler after 10.00 seconds, closing client\n",
+      "_GatheringFuture exception was never retrieved\n",
+      "future: <_GatheringFuture finished exception=CancelledError()>\n",
+      "concurrent.futures._base.CancelledError\n"
+     ]
+    }
+   ],
+   "source": [
+    "# KILL THE SGE WORKERS\n",
+    "dask_client.shutdown()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/notebooks/compare_two_samples.ipynb b/notebooks/compare_two_samples.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..299ceffecdb441ea5ee819125f186cdcad2a21fc
--- /dev/null
+++ b/notebooks/compare_two_samples.ipynb
@@ -0,0 +1,294 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Comparing 2 samples\n",
+    "\n",
+    "In this example we show how to take a baseline and compare two samples with it"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 10,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "WARNING:tensorflow:SavedModel saved prior to TF 2.4 detected when loading Keras model. Please ensure that you are saving the model with model.save() or tf.keras.models.save_model(), *NOT* tf.saved_model.save(). To confirm, there should be a file named \"keras_metadata.pb\" in the SavedModel directory.\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-0._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-1._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-2._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-3._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-3._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-4._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-6._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-5._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-6._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-7._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-8._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-9._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-10._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-11._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-12._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-13._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-14._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-15._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-16._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-17._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-18._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-19._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-20._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-21._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-22._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-23._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-24._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-25._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-26._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-27._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-28._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-29._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-30._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-31._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-32._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-33._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-34._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-35._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-36._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-37._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-38._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-39._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-40._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-41._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-42._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-43._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-44._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-45._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-46._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-47._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-48._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-51._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-52._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-49._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-50._inbound_nodes\n",
+      "WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.\n",
+      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n",
+      "-0.40941279115199114\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Add the path of sample A with sample B\n",
+    "SAMPLE_A = \"\"\n",
+    "SAMPLE_B = \"\"\n",
+    "\n",
+    "from bob.pipelines import SampleSet, DelayedSample\n",
+    "import bob.io.base\n",
+    "import functools\n",
+    "\n",
+    "# Picking a baseline\n",
+    "from bob.bio.face.embeddings.tensorflow import inception_resnet_v2_casia_centerloss_2018\n",
+    "\n",
+    "# Setting where the eyes are for each sample (here the two samples have the same eyes position)\n",
+    "fixed_positions = {\"leye\":(60,123), \"reye\":(60,63)}\n",
+    "\n",
+    "pipeline = inception_resnet_v2_casia_centerloss_2018(\n",
+    "    annotation_type=\"eyes-center\", fixed_positions=fixed_positions\n",
+    ")\n",
+    "\n",
+    "def create_sample(path, key):\n",
+    "    return SampleSet(\n",
+    "    [DelayedSample(functools.partial(bob.io.base.load, path), key=str(key))],\n",
+    "    key=str(key),\n",
+    "    biometric_id=str(key),)\n",
+    "\n",
+    "sample_A = create_sample(SAMPLE_A, 0)\n",
+    "sample_B = create_sample(SAMPLE_B, 1)\n",
+    "\n",
+    "\n",
+    "biometric_references = pipeline.create_biometric_reference([sample_A])\n",
+    "scores, _ = pipeline.compute_scores([sample_B], biometric_references)\n",
+    "\n",
+    "# Printing the score from the first sample\n",
+    "print(scores[0].samples[0].data)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Extracting embeddings from two samples"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 14,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "WARNING:tensorflow:SavedModel saved prior to TF 2.4 detected when loading Keras model. Please ensure that you are saving the model with model.save() or tf.keras.models.save_model(), *NOT* tf.saved_model.save(). To confirm, there should be a file named \"keras_metadata.pb\" in the SavedModel directory.\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-0._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-1._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-2._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-3._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-3._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-4._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-6._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-5._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-6._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-7._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-8._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-9._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-10._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-11._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-12._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-13._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-14._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-15._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-16._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-17._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-18._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-19._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-20._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-21._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-22._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-23._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-24._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-25._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-26._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-27._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-28._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-29._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-30._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-31._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-32._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-33._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-34._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-35._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-36._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-37._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-38._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-39._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-40._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-41._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-42._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-43._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-44._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-45._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-46._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-47._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-48._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-51._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer-52._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-49._inbound_nodes\n",
+      "WARNING:tensorflow:Unresolved object in checkpoint: (root).layer_with_weights-50._inbound_nodes\n",
+      "WARNING:tensorflow:A checkpoint was restored (e.g. tf.train.Checkpoint.restore or tf.keras.Model.load_weights) but not all checkpointed values were used. See above for specific issues. Use expect_partial() on the load status object, e.g. tf.train.Checkpoint.restore(...).expect_partial(), to silence these warnings, or use assert_consumed() to make the check explicit. See https://www.tensorflow.org/guide/checkpoint#loading_mechanics for details.\n",
+      "WARNING:tensorflow:No training configuration found in save file, so the model was *not* compiled. Compile it manually.\n",
+      "WARNING:tensorflow:5 out of the last 8 calls to <function Model.make_predict_function.<locals>.predict_function at 0x7fe46e4b54d0> triggered tf.function retracing. Tracing is expensive and the excessive number of tracings could be due to (1) creating @tf.function repeatedly in a loop, (2) passing tensors with different shapes, (3) passing Python objects instead of tensors. For (1), please define your @tf.function outside of the loop. For (2), @tf.function has experimental_relax_shapes=True option that relaxes argument shapes that can avoid unnecessary retracing. For (3), please refer to https://www.tensorflow.org/guide/function#controlling_retracing and https://www.tensorflow.org/api_docs/python/tf/function for  more details.\n",
+      "[array([ 0.09172679,  0.0324957 ,  0.03359955, -0.00483591,  0.02353592,\n",
+      "        0.04886897, -0.00094087,  0.06978928,  0.10372686, -0.10392421,\n",
+      "        0.13330463,  0.07076985,  0.08029327, -0.04671128, -0.02518994,\n",
+      "        0.12051351,  0.06940517, -0.038332  , -0.02331832,  0.02664019,\n",
+      "        0.04434336, -0.02471577, -0.0102796 , -0.085636  ,  0.09365616,\n",
+      "        0.01478034, -0.01528178,  0.03954509,  0.02043953,  0.0704874 ,\n",
+      "       -0.03089085,  0.0312874 ,  0.10803461, -0.05972857, -0.16184652,\n",
+      "       -0.03643044,  0.09576611, -0.08911359, -0.13417085, -0.03623924,\n",
+      "        0.16680372,  0.06437656, -0.12195335,  0.15070474, -0.00109789,\n",
+      "       -0.01936167,  0.12604736, -0.02172692, -0.16884778, -0.03880082,\n",
+      "        0.03399109, -0.05475918,  0.0058937 , -0.24445663, -0.0814684 ,\n",
+      "        0.06544494,  0.09598684,  0.18717638,  0.02309373, -0.1937956 ,\n",
+      "       -0.11580025, -0.03947522,  0.02225816, -0.17796317,  0.0286997 ,\n",
+      "        0.05637549,  0.13862395,  0.07888459, -0.0746187 , -0.06245924,\n",
+      "       -0.05905129, -0.06469788, -0.13217713, -0.00836692, -0.03132226,\n",
+      "        0.02515431,  0.10684554,  0.03239268, -0.08999073,  0.09488151,\n",
+      "        0.07205841, -0.05697955, -0.01520361, -0.10826666, -0.11681797,\n",
+      "       -0.06068925,  0.08056987,  0.11805228, -0.01560696, -0.05713017,\n",
+      "        0.00941482, -0.08339076,  0.10243417,  0.1385035 , -0.08926324,\n",
+      "       -0.08198499,  0.09009691,  0.01847403, -0.04287886,  0.01292743,\n",
+      "       -0.02277992, -0.14719044,  0.10625618,  0.00111244,  0.19167267,\n",
+      "       -0.01272508,  0.08188605, -0.12089421, -0.05653883, -0.05532645,\n",
+      "       -0.05844725, -0.11874794,  0.12547068, -0.06742927, -0.07416619,\n",
+      "       -0.01912123,  0.04917778, -0.10811004,  0.04180698,  0.18301588,\n",
+      "       -0.00808303,  0.00328449, -0.12431399, -0.0101751 , -0.10478832,\n",
+      "        0.12978972,  0.09860662,  0.12227818], dtype=float32)]\n"
+     ]
+    }
+   ],
+   "source": [
+    "# Add the path of sample A with sample B\n",
+    "SAMPLE_A = \"\"\n",
+    "SAMPLE_B = \"\"\n",
+    "\n",
+    "from bob.pipelines import SampleSet, DelayedSample\n",
+    "import bob.io.base\n",
+    "import functools\n",
+    "\n",
+    "# Picking a baseline\n",
+    "from bob.bio.face.embeddings.tensorflow import inception_resnet_v2_casia_centerloss_2018\n",
+    "\n",
+    "# Setting where the eyes are for each sample (here the two samples have the same eyes position)\n",
+    "fixed_positions = {\"leye\":(60,123), \"reye\":(60,63)}\n",
+    "\n",
+    "pipeline = inception_resnet_v2_casia_centerloss_2018(\n",
+    "    annotation_type=\"eyes-center\", fixed_positions=fixed_positions\n",
+    ")\n",
+    "transformer = pipeline.transformer\n",
+    "\n",
+    "def create_sample(path, key):\n",
+    "    return SampleSet(\n",
+    "    [DelayedSample(functools.partial(bob.io.base.load, path), key=str(key))],\n",
+    "    key=str(key),\n",
+    "    biometric_id=str(key),)\n",
+    "\n",
+    "sample_A = create_sample(SAMPLE_A, 0)\n",
+    "sample_B = create_sample(SAMPLE_B, 1)\n",
+    "\n",
+    "\n",
+    "transformed_samples = transformer.transform([sample_A, sample_B])\n",
+    "\n",
+    "print([x.data for x in transformed_samples[0].samples])\n",
+    "\n",
+    "\n",
+    "#biometric_references = pipeline.create_biometric_reference([sample_A])\n",
+    "#scores, _ = pipeline.compute_scores([sample_B], biometric_references)\n",
+    "\n",
+    "# Printing the score from the first sample\n",
+    "#print(scores[0].samples[0].data)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 4
+}
diff --git a/notebooks/inject_samples.ipynb b/notebooks/inject_samples.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..7ca01a40819a618a5ac6dba6f08a95fd0e26e8f1
--- /dev/null
+++ b/notebooks/inject_samples.ipynb
@@ -0,0 +1,350 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Injecting extra samples in vanilla biometrics protocols\n",
+    "\n",
+    "Sometimes our experiments go beyond \"simple\" database protocols.\n",
+    "Sometimes we just want to analyze the impact of some extra samples in our experiments without writing a whole dataset intergace for that.\n",
+    "\n",
+    "This notebook shows how to \"inject\" samples that doesn't belong to any protocol to some existing protocol.\n",
+    "We'll show case how to inject samples to perform score normalization.\n",
+    "\n",
+    "## Preparing the database\n",
+    "\n",
+    "We'll show case how to perform this injection using the MEDS dataset."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "dask_client = None\n",
+    "\n",
+    "OUTPUT_PATH = \"YOUR_TEMP\"\n",
+    "PATH_INJECTED_DATA = \"/idiap/temp/parzul/db_gen_output/database_neutralized/image/00000/\"\n",
+    "\n",
+    "\n",
+    "##### CHANGE YOUR DATABASE HERE\n",
+    "from bob.bio.face.database import MEDSDatabase\n",
+    "\n",
+    "database = MEDSDatabase(protocol=\"verification_fold1\")\n",
+    "\n",
+    "# Fetching the keys\n",
+    "#references = database.zprobes()[0].references\n",
+    "references = database.probes(group=\"eval\")[0].references + database.probes(group=\"dev\")[0].references\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "# Loading samples that will be injected\n",
+    "\n",
+    "Here we'll inject samples for znorm and tnorm"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# PATH\n",
+    "import os\n",
+    "import functools\n",
+    "import bob.io.base\n",
+    "# Fetching real data\n",
+    "#treferences = database.treferences()\n",
+    "#zprobes = database.zprobes()\n",
+    "\n",
+    "eyes_annotations={'leye': (61, 120),\n",
+    "                  'reye': (61, 63)}\n",
+    "\n",
+    "\n",
+    "treferences_lst = [\"0/0_ethnicity_0.png\",\n",
+    "                   \"0/0_ethnicity_7.png\"]\n",
+    "\n",
+    "zprobes_lst = [\"1/1_ethnicity_0.png\",\n",
+    "               \"1/1_ethnicity_7.png\"]\n",
+    "\n",
+    "from bob.pipelines import Sample, DelayedSample, SampleSet\n",
+    "\n",
+    "# Converting every element in a list in a sample set\n",
+    "def list_to_sampleset(lst, base_path, eyes_annotations, references):\n",
+    "    sample_sets = []\n",
+    "    for i,l in enumerate(lst):\n",
+    "        sample = DelayedSample(functools.partial(bob.io.base.load,os.path.join(base_path,l)),\n",
+    "                               key=l,\n",
+    "                               reference_id=str(i),\n",
+    "                               annotations=eyes_annotations\n",
+    "                                )\n",
+    "        sset = SampleSet(samples=[sample],\n",
+    "                         key=l,\n",
+    "                         reference_id=str(i),\n",
+    "                         references=references)\n",
+    "\n",
+    "        sample_sets.append(sset)\n",
+    "    return sample_sets\n",
+    "\n",
+    "\n",
+    "treferences = list_to_sampleset(treferences_lst, PATH_INJECTED_DATA,eyes_annotations, references=None)\n",
+    "zprobes = list_to_sampleset(zprobes_lst, PATH_INJECTED_DATA, eyes_annotations, references=references)\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "\n",
+    "## Preparing the pipeline\n",
+    "\n",
+    "Here we are using the arcface from insight face (https://github.com/deepinsight/insightface).\n",
+    "Feel free to change it by looking at (`bob.bio.face.embeddings`)."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Pipeline(steps=[('ToDaskBag', ToDaskBag(partition_size=200)),\n",
+      "                ('samplewrapper-1',\n",
+      "                 DaskWrapper(estimator=CheckpointWrapper(estimator=SampleWrapper(estimator=FaceCrop(annotator=BobIpMTCNN(),\n",
+      "                                                                                                    cropped_image_size=(112,\n",
+      "                                                                                                                        112),\n",
+      "                                                                                                    cropped_positions={'leye': (55,\n",
+      "                                                                                                                                81),\n",
+      "                                                                                                                       'reye': (55,\n",
+      "                                                                                                                                42)}),\n",
+      "                                                                                 fit_extra_arguments=(),\n",
+      "                                                                                 transform_extra_arguments=(('annotations',\n",
+      "                                                                                                             'annotations'),)),\n",
+      "                                                         fe...\n",
+      "                                                         save_func=<function save at 0x7fccf501c560>))),\n",
+      "                ('samplewrapper-2',\n",
+      "                 DaskWrapper(estimator=CheckpointWrapper(estimator=SampleWrapper(estimator=ArcFaceInsightFace_LResNet100(),\n",
+      "                                                                                 fit_extra_arguments=(),\n",
+      "                                                                                 transform_extra_arguments=()),\n",
+      "                                                         features_dir='/idiap/temp/tpereira/inject-example/samplewrapper-2',\n",
+      "                                                         load_func=<function load at 0x7fccf501c3b0>,\n",
+      "                                                         save_func=<function save at 0x7fccf501c560>)))])\n"
+     ]
+    }
+   ],
+   "source": [
+    "import os\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import dask_vanilla_biometrics\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import ZTNormPipeline, ZTNormCheckpointWrapper\n",
+    "from bob.bio.base.pipelines.vanilla_biometrics import CSVScoreWriter\n",
+    "\n",
+    "from bob.bio.face.embeddings.mxnet import arcface_insightFace_lresnet100\n",
+    "pipeline = arcface_insightFace_lresnet100(annotation_type=database.annotation_type,\n",
+    "                                          fixed_positions=None,\n",
+    "                                          memory_demanding=False)\n",
+    "\n",
+    "\n",
+    "## SCORE WRITER\n",
+    "# Here we want the pipeline to write using METADATA\n",
+    "pipeline.score_writer = CSVScoreWriter(os.path.join(OUTPUT_PATH, \"./tmp\"))\n",
+    "\n",
+    "\n",
+    "# Agregating with checkpoint\n",
+    "pipeline = checkpoint_vanilla_biometrics(pipeline, OUTPUT_PATH)\n",
+    "\n",
+    "\n",
+    "#pipeline = dask_vanilla_biometrics(ZTNormCheckpointWrapper(ZTNormPipeline(pipeline), OUTPUT_PATH))\n",
+    "# AGGREGATING WITH ZTNORM\n",
+    "pipeline = ZTNormPipeline(pipeline)\n",
+    "pipeline.ztnorm_solver = ZTNormCheckpointWrapper(\n",
+    "    pipeline.ztnorm_solver, os.path.join(OUTPUT_PATH, \"normed-scores\")\n",
+    ")\n",
+    "pipeline = dask_vanilla_biometrics(pipeline, partition_size=200)\n",
+    "\n",
+    "print(pipeline.transformer)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Setting the DASK client (optional step; do it if you want to use the grid)\n",
+    "\n",
+    "**HERE MAKE ABSOLUTELLY SURE THAT YOU DO `SETSHELL grid`  BEFORE STARTING THE NOTEBOOK**\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "from dask.distributed import Client\n",
+    "from bob.pipelines.distributed.sge import SGEMultipleQueuesCluster\n",
+    "\n",
+    "cluster = SGEMultipleQueuesCluster(min_jobs=1)\n",
+    "dask_client = Client(cluster)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "As an example, we consider 10 samples from this database and extract features for these samples:"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "## Running the vanilla Biometrics"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "import os\n",
+    "def post_process_scores(pipeline, scores, path):\n",
+    "    written_scores = pipeline.write_scores(scores)\n",
+    "    return pipeline.post_process(written_scores, path)    \n",
+    "\n",
+    "def _build_filename(score_file_name, suffix):\n",
+    "    return os.path.join(score_file_name, suffix)\n",
+    "\n",
+    "from dask.delayed import Delayed\n",
+    "import dask.bag\n",
+    "def compute_scores(result, dask_client):\n",
+    "    if isinstance(result, Delayed) or isinstance(result, dask.bag.Bag):\n",
+    "        if dask_client is not None:\n",
+    "            result = result.compute(scheduler=dask_client)\n",
+    "        else:\n",
+    "            print(\"`dask_client` not set. Your pipeline will run locally\")\n",
+    "            result = result.compute(scheduler=\"single-threaded\")\n",
+    "    return result\n",
+    "\n",
+    "background_model_samples = database.background_model_samples()\n",
+    "for group in [\"dev\",\"eval\"]:    \n",
+    "\n",
+    "    score_file_name = os.path.join(OUTPUT_PATH, f\"scores-{group}\")\n",
+    "    biometric_references = database.references(group=group)\n",
+    "    probes = database.probes(group=group)\n",
+    "    \n",
+    "    (\n",
+    "        raw_scores,\n",
+    "        z_normed_scores,\n",
+    "        t_normed_scores,\n",
+    "        zt_normed_scores,\n",
+    "        s_normed_scores,\n",
+    "    ) = pipeline(\n",
+    "        background_model_samples,\n",
+    "        biometric_references,\n",
+    "        probes,\n",
+    "        zprobes,\n",
+    "        treferences,\n",
+    "        allow_scoring_with_all_biometric_references=True,\n",
+    "    )        \n",
+    "    \n",
+    "    \n",
+    "    \n",
+    "\n",
+    "    # Running RAW_SCORES\n",
+    "\n",
+    "    raw_scores = post_process_scores(\n",
+    "        pipeline, raw_scores, _build_filename(score_file_name, \"raw_scores\")\n",
+    "    )\n",
+    "    _ = compute_scores(raw_scores, dask_client)\n",
+    "\n",
+    "    # Z-SCORES\n",
+    "    z_normed_scores = post_process_scores(\n",
+    "        pipeline,\n",
+    "        z_normed_scores,\n",
+    "        _build_filename(score_file_name, \"z_normed_scores\"),\n",
+    "    )\n",
+    "    _ = compute_scores(z_normed_scores, dask_client)\n",
+    "\n",
+    "    # T-SCORES\n",
+    "    t_normed_scores = post_process_scores(\n",
+    "        pipeline,\n",
+    "        t_normed_scores,\n",
+    "        _build_filename(score_file_name, \"t_normed_scores\"),\n",
+    "    )\n",
+    "    _ = compute_scores(t_normed_scores, dask_client)\n",
+    "\n",
+    "    # S-SCORES\n",
+    "    s_normed_scores = post_process_scores(\n",
+    "        pipeline,\n",
+    "        s_normed_scores,\n",
+    "        _build_filename(score_file_name, \"s_normed_scores\"),\n",
+    "    )\n",
+    "    _ = compute_scores(s_normed_scores, dask_client)\n",
+    "\n",
+    "    # ZT-SCORES\n",
+    "    zt_normed_scores = post_process_scores(\n",
+    "        pipeline,\n",
+    "        zt_normed_scores,\n",
+    "        _build_filename(score_file_name, \"zt_normed_scores\"),\n",
+    "    )\n",
+    "    _ = compute_scores(zt_normed_scores, dask_client)\n",
+    "\n"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "metadata": {},
+   "source": [
+    "In the following cells, we convert the extracted features to `numpy.array` and check the size of features."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 6,
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "# KILL THE SGE WORKERS\n",
+    "dask_client.shutdown()"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.7.7"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/setup.py b/setup.py
index 1a1578b55d5aec93e6798244708c82c741c0f017..9e789e233ea96d06dc92981cdad210ff0f1f1ffe 100644
--- a/setup.py
+++ b/setup.py
@@ -110,29 +110,13 @@ setup(
             "meds = bob.bio.face.config.database.meds:database",
             "morph = bob.bio.face.config.database.morph:database",
             "casia-africa = bob.bio.face.config.database.casia_africa:database",
-            "pola-thermal = bob.bio.face.config.database.pola_thermal:database",
-            "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2:database",
         ],
         "bob.bio.annotator": [
             "facedetect               = bob.bio.face.config.annotator.facedetect:annotator",
             "facedetect-eye-estimate  = bob.bio.face.config.annotator.facedetect_eye_estimate:annotator",
             "flandmark                = bob.bio.face.config.annotator.flandmark:annotator",
             "mtcnn                    = bob.bio.face.config.annotator.mtcnn:annotator",
-        ],
-        "bob.bio.transformer": [
-            "facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:transformer",
-            "facedetect = bob.bio.face.config.annotator.facedetect:transformer",
-            "flandmark = bob.bio.face.config.annotator.flandmark:annotator",
-            "mtcnn = bob.bio.face.config.annotator.mtcnn:transformer",
-            "facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:transformer",
-            "inception-resnetv1-casiawebface = bob.bio.face.config.baseline.inception_resnetv1_casiawebface:transformer",
-            "inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer",
-            "inception-resnetv1-msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb:transformer",
-            "inception-resnetv2-msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb:transformer",
-            "arcface-insightface = bob.bio.face.config.baseline.arcface_insightface:transformer",
-            "gabor-graph = bob.bio.face.config.baseline.gabor_graph:transformer",
-            "lgbphs = bob.bio.face.config.baseline.lgbphs:transformer",
-            "dummy = bob.bio.face.config.baseline.dummy:transformer",
+            "tinyface                 = bob.bio.face.config.annotator.tinyface:annotator",
         ],
         # baselines
         "bob.bio.pipeline": [
@@ -147,8 +131,14 @@ setup(
             "lda = bob.bio.face.config.baseline.lda:pipeline",
             "dummy = bob.bio.face.config.baseline.dummy:pipeline",
             "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline",
+            "resnet50-msceleb-arcface-20210521 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_20210521:pipeline",
             "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021:pipeline",
-            "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
+            "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021:pipeline",
+            "afffe = bob.bio.face.config.baseline.afffe:pipeline",
+            "vgg16-oxford = bob.bio.face.config.baseline.vgg16_oxford:pipeline",
+            "iresnet34 = bob.bio.face.config.baseline.iresnet34:pipeline",
+            "iresnet50 = bob.bio.face.config.baseline.iresnet50:pipeline",
+            "iresnet100 = bob.bio.face.config.baseline.iresnet100:pipeline",
         ],
         "bob.bio.config": [
             "facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg",
@@ -160,6 +150,8 @@ setup(
             "arcface-insightface = bob.bio.face.config.baseline.arcface_insightface",
             "lgbphs = bob.bio.face.config.baseline.lgbphs",
             "lda = bob.bio.face.config.baseline.lda",
+            "afffe = bob.bio.face.config.baseline.afffe",
+            "vgg16-oxford = bob.bio.face.config.baseline.vgg16_oxford",
             "arface            = bob.bio.face.config.database.arface",
             "atnt              = bob.bio.face.config.database.atnt",
             "gbu               = bob.bio.face.config.database.gbu",
@@ -173,13 +165,15 @@ setup(
             "replaymobile-img  = bob.bio.face.config.database.replaymobile",
             "fargo  = bob.bio.face.config.database.fargo",
             "meds = bob.bio.face.config.database.meds",
-            "morph = bob.bio.face.config.database.morph",
             "casia-africa = bob.bio.face.config.database.casia_africa",
-            "pola-thermal = bob.bio.face.config.database.pola_thermal",
-            "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2",
+            "morph = bob.bio.face.config.database.morph",
             "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021",
+            "resnet50-msceleb-arcface-20210521 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_20210521:pipeline",
             "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021",
             "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
+            "iresnet34 = bob.bio.face.config.baseline.iresnet34",
+            "iresnet50 = bob.bio.face.config.baseline.iresnet50",
+            "iresnet100 = bob.bio.face.config.baseline.iresnet100",
         ],
         "bob.bio.cli": [
             "display-face-annotations          = bob.bio.face.script.display_face_annotations:display_face_annotations",