diff --git a/bob/bio/face/config/baseline/facenet_sanderberg.py b/bob/bio/face/config/baseline/facenet_sanderberg.py
index cf33486b93617e2fc5a3d47ce893f3bfb6124c72..3c989a92b5286b120ff571de8f7398e1f462fbd3 100644
--- a/bob/bio/face/config/baseline/facenet_sanderberg.py
+++ b/bob/bio/face/config/baseline/facenet_sanderberg.py
@@ -1,4 +1,4 @@
-from bob.bio.face.embeddings import FaceNetSanderberg
+from bob.bio.face.embeddings import FaceNetSanderberg_20170512_110547
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -17,7 +17,7 @@ else:
 def load(annotation_type, fixed_positions=None):
 
     transformer = embedding_transformer_160x160(
-        FaceNetSanderberg(), annotation_type, fixed_positions
+        FaceNetSanderberg_20170512_110547(), annotation_type, fixed_positions
     )
     algorithm = Distance()
 
diff --git a/bob/bio/face/config/baseline/gabor_graph.py b/bob/bio/face/config/baseline/gabor_graph.py
index 2798fb5e07a76d21444f688ec0d67dc75f4cb1f4..b5fca41de57728b39a1bfb71687e9d332c9fde65 100644
--- a/bob/bio/face/config/baseline/gabor_graph.py
+++ b/bob/bio/face/config/baseline/gabor_graph.py
@@ -24,15 +24,14 @@ else:
     annotation_type = None
     fixed_positions = None
 
-
-def load(annotation_type, fixed_positions=None):
-    ####### SOLVING THE FACE CROPPER TO BE USED ##########
-
+def get_cropper(annotation_type, fixed_positions=None):
     # Cropping
     face_cropper, transform_extra_arguments = crop_80x64(
         annotation_type, fixed_positions, color_channel="gray"
     )
+    return face_cropper, transform_extra_arguments
 
+def get_pipeline(face_cropper, transform_extra_arguments):
     preprocessor = bob.bio.face.preprocessor.INormLBP(
         face_cropper=face_cropper, dtype=np.float64
     )
@@ -79,6 +78,11 @@ def load(annotation_type, fixed_positions=None):
     algorithm = BioAlgorithmLegacy(gabor_jet, base_dir=tempdir)
     return VanillaBiometricsPipeline(transformer, algorithm)
 
+def load(annotation_type, fixed_positions=None):
+    ####### SOLVING THE FACE CROPPER TO BE USED ##########
+    face_cropper, transform_extra_arguments = get_cropper(annotation_type, fixed_positions)
+    return get_pipeline(face_cropper, transform_extra_arguments)
+
 pipeline = load(annotation_type, fixed_positions)
 
 transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index 0cd136b1ce3199cfef0593e94ef324427b9c7a11..910f1d8867c28a72826b578bd7683d6eb21a2f2e 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -5,55 +5,131 @@ from bob.pipelines import wrap
 from bob.bio.face.helpers import face_crop_solver
 import numpy as np
 
+def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
+    """
+    Computes the default cropped positions for the FaceCropper used with Facenet-like 
+    Embedding extractors, proportionally to the target image size
 
-def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
+
+    Parameters
+    ----------
+       cropped_image_size : tuple
+          A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
+
+       annotation_type: str
+          Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile', 
+          'right-profile'  and None
+
+    Returns
+    -------
+
+      cropped_positions:
+         The dictionary of cropped positions that will be feeded to the FaceCropper.
     """
-    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
-    This transformer is suited for Facenet based architectures
-    
-    .. warning::
-       This will resize images to :math:`160 \times 160`
+    CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
+
+    if annotation_type == "bounding-box":
+
+        TOP_LEFT_POS = (0, 0)
+        BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+        cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
+
+    elif annotation_type == "eyes-center":
+
+        RIGHT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(1/3*CROPPED_IMAGE_WIDTH))
+        LEFT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(2/3*CROPPED_IMAGE_WIDTH))
+        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
+
+    elif annotation_type == "left-profile":
+
+        EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
+        MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
+        cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
+
+    elif annotation_type == "right-profile":
+
+        EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
+        MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
+        cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
     
+    else:
+
+        cropped_positions = None
+
+    return cropped_positions
+
+def legacy_default_cropping(cropped_image_size, annotation_type):
     """
+    Computes the default cropped positions for the FaceCropper used with legacy extractors, 
+    proportionally to the target image size
 
-    # This is the size of the image that this model expects
-    CROPPED_IMAGE_HEIGHT = 160
-    CROPPED_IMAGE_WIDTH = 160
-    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-    color_channel = "rgb"
 
-    #### SOLVING THE FACE CROPPER TO BE USED
+    Parameters
+    ----------
+       cropped_image_size : tuple
+          A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
+
+       annotation_type: str
+          Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile', 
+          'right-profile' and None
+
+    Returns
+    -------
+
+      cropped_positions:
+         The dictionary of cropped positions that will be feeded to the FaceCropper.
+    """
+    CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
+
     if annotation_type == "bounding-box":
-        transform_extra_arguments = (("annotations", "annotations"),)
+
         TOP_LEFT_POS = (0, 0)
         BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-
-        # Detects the face and crops it without eye detection
-        face_cropper = face_crop_solver(
-            cropped_image_size,
-            color_channel=color_channel,
-            cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
-            fixed_positions=fixed_positions,
-        )
+        cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
 
     elif annotation_type == "eyes-center":
-        transform_extra_arguments = (("annotations", "annotations"),)
-        # eye positions for frontal images
-        RIGHT_EYE_POS = (46, 53)
-        LEFT_EYE_POS = (46, 107)
 
-        # Detects the face and crops it without eye detection
-        face_cropper = face_crop_solver(
+        RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
+        LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
+        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
+
+    elif annotation_type == "left-profile":
+        # Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
+        EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
+        MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
+        cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
+
+    elif annotation_type == "right-profile":
+        # Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
+        EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
+        MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
+        cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
+    
+    else:
+
+        cropped_positions = None
+
+    return cropped_positions
+
+def embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions=None):
+    """
+    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
+    This transformer is suited for Facenet based architectures
+    
+    .. warning::
+       This will resize images to the requested `image_size`
+    
+    """
+    color_channel = "rgb"
+
+    face_cropper = face_crop_solver(
             cropped_image_size,
             color_channel=color_channel,
-            cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
+            cropped_positions=cropped_positions,
             fixed_positions=fixed_positions,
         )
 
-    else:
-        transform_extra_arguments = None
-        # DEFAULT TO FACE SIMPLE RESIZE
-        face_cropper = face_crop_solver(cropped_image_size)
+    transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
 
     transformer = make_pipeline(
         wrap(
@@ -66,66 +142,39 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
 
     return transformer
 
-
-def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
+def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
     """
     Creates a pipeline composed by and FaceCropper and an Embedding extractor.
     This transformer is suited for Facenet based architectures
     
     .. warning::
-       This will resize images to :math:`112 \times 112`
+       This will resize images to :math:`160 \times 160`
     
     """
+    cropped_positions = embedding_transformer_default_cropping((160, 160), annotation_type)
 
-    # This is the size of the image that this model expects
-    CROPPED_IMAGE_HEIGHT = 112
-    CROPPED_IMAGE_WIDTH = 112
-    cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-    color_channel = "rgb"
+    return embedding_transformer((160, 160), embedding, annotation_type, cropped_positions, fixed_positions)
 
-    #### SOLVING THE FACE CROPPER TO BE USED
-    if annotation_type == "bounding-box":
-        transform_extra_arguments = (("annotations", "annotations"),)
-        TOP_LEFT_POS = (0, 0)
-        BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
 
-        # Detects the face and crops it without eye detection
-        face_cropper = face_crop_solver(
-            cropped_image_size,
-            color_channel=color_channel,
-            cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
-            fixed_positions=fixed_positions,
-        )
-
-    elif annotation_type == "eyes-center":
-        transform_extra_arguments = (("annotations", "annotations"),)
-        # eye positions for frontal images
-        RIGHT_EYE_POS = (32, 34)
-        LEFT_EYE_POS = (32, 77)
-
-        # Detects the face and crops it without eye detection
-        face_cropper = face_crop_solver(
-            cropped_image_size,
-            color_channel=color_channel,
-            cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
-            fixed_positions=fixed_positions,
-        )
+def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
+    """
+    Creates a pipeline composed by and FaceCropper and an Embedding extractor.
+    This transformer is suited for Facenet based architectures
+    
+    .. warning::
+       This will resize images to :math:`112 \times 112`
+    
+    """
+    cropped_image_size = (112, 112)
 
+    if annotation_type == "eyes-center":
+        # Hard coding eye positions for backward consistency
+        cropped_positions = {'leye': (32, 77), 'reye': (32, 34)}
     else:
-        transform_extra_arguments = None
-        # DEFAULT TO FACE SIMPLE RESIZE
-        face_cropper = face_crop_solver(cropped_image_size)
-
-    transformer = make_pipeline(
-        wrap(
-            ["sample"],
-            face_cropper,
-            transform_extra_arguments=transform_extra_arguments,
-        ),
-        wrap(["sample"], embedding),
-    )
+        # Will use default 
+        cropped_positions = embedding_transformer_default_cropping(cropped_image_size, annotation_type)
 
-    return transformer
+    return embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions)
 
 
 def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
@@ -156,49 +205,25 @@ def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
          The parameters to the transformer
 
     """
+    color_channel = color_channel
+    dtype = np.float64
 
     # Cropping
     CROPPED_IMAGE_HEIGHT = 80
     CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
-
-    # eye positions for frontal images
-    RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
-    LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
-
     cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
-    color_channel = color_channel
-    dtype = np.float64
 
-    if annotation_type == "bounding-box":
-        transform_extra_arguments = (("annotations", "annotations"),)
-        TOP_LEFT_POS = (0, 0)
-        BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
+    cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
 
-        # Detects the face and crops it without eye detection
-        face_cropper = face_crop_solver(
-            cropped_image_size,
-            color_channel=color_channel,
-            cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
-            fixed_positions=fixed_positions,
-            dtype=dtype
-        )
-
-    elif annotation_type == "eyes-center":
-        transform_extra_arguments = (("annotations", "annotations"),)
-        # eye positions for frontal images
 
-        # Detects the face and crops it without eye detection
-        face_cropper = face_crop_solver(
+    face_cropper = face_crop_solver(
             cropped_image_size,
             color_channel=color_channel,
-            cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
+            cropped_positions=cropped_positions,
             fixed_positions=fixed_positions,
             dtype=dtype
         )
-
-    else:
-        transform_extra_arguments = None
-        # DEFAULT TO FACE SIMPLE RESIZE
-        face_cropper = face_crop_solver(cropped_image_size)
+    
+    transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
 
     return face_cropper, transform_extra_arguments
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
index 041034c3e1bcd8035d49233e123538440761e984..7b8bfc4a23c926f998fe01a77fd0b12734345b95 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_casiawebface.py
@@ -1,4 +1,4 @@
-from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
+from bob.bio.face.embeddings import InceptionResnetv1_Casia_CenterLoss_2018
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -15,7 +15,7 @@ else:
 
 def load(annotation_type, fixed_positions=None):
     transformer = embedding_transformer_160x160(
-        InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions
+        InceptionResnetv1_Casia_CenterLoss_2018(), annotation_type, fixed_positions
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
index 087eff8f719d13e1c9e9a9851c3eb0558e417aec..b09ade9be0e785b9678bcb1354cee3c441b13f46 100644
--- a/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv1_msceleb.py
@@ -1,4 +1,4 @@
-from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
+from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb_CenterLoss_2018
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -16,7 +16,7 @@ else:
 
 def load(annotation_type, fixed_positions=None):
     transformer = embedding_transformer_160x160(
-        InceptionResnetv1_MsCeleb(), annotation_type, fixed_positions
+        InceptionResnetv1_MsCeleb_CenterLoss_2018(), annotation_type, fixed_positions
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
index d5b91c11769cf810360eee8811f20d9d1476e323..82fc0eb843a2e8ad13c46bcce818a05840ae3b9c 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_casiawebface.py
@@ -1,4 +1,4 @@
-from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
+from bob.bio.face.embeddings import InceptionResnetv2_Casia_CenterLoss_2018
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -16,7 +16,7 @@ else:
 
 def load(annotation_type, fixed_positions=None):
     transformer = embedding_transformer_160x160(
-        InceptionResnetv2_CasiaWebFace(), annotation_type, fixed_positions
+        InceptionResnetv2_Casia_CenterLoss_2018(), annotation_type, fixed_positions
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
index 7db50cd6ee2f3ea61b7b6b41376dc167e198e435..6c28239830e7cea1b95d41a76dc2b9e4bd4f27cb 100644
--- a/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
+++ b/bob/bio/face/config/baseline/inception_resnetv2_msceleb.py
@@ -1,4 +1,4 @@
-from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
+from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb_CenterLoss_2018
 from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
 from bob.bio.base.pipelines.vanilla_biometrics import (
     Distance,
@@ -16,7 +16,7 @@ else:
 
 def load(annotation_type, fixed_positions=None):
     transformer = embedding_transformer_160x160(
-        InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions
+        InceptionResnetv2_MsCeleb_CenterLoss_2018(), annotation_type, fixed_positions
     )
 
     algorithm = Distance()
diff --git a/bob/bio/face/config/baseline/lgbphs.py b/bob/bio/face/config/baseline/lgbphs.py
index e906159b276e46e1f4c44ccef0a40d8c6e250c42..8b2ee18103d15d43ef515970888337d11c3242c6 100644
--- a/bob/bio/face/config/baseline/lgbphs.py
+++ b/bob/bio/face/config/baseline/lgbphs.py
@@ -21,15 +21,14 @@ else:
     annotation_type = None
     fixed_positions = None
 
-
-def load(annotation_type, fixed_positions=None):
-    ####### SOLVING THE FACE CROPPER TO BE USED ##########
-
+def get_cropper(annotation_type, fixed_positions=None):
     # Cropping
     face_cropper, transform_extra_arguments = crop_80x64(
         annotation_type, fixed_positions, color_channel="gray"
     )
+    return face_cropper, transform_extra_arguments
 
+def get_pipeline(face_cropper, transform_extra_arguments):
     preprocessor = bob.bio.face.preprocessor.TanTriggs(
         face_cropper=face_cropper, dtype=np.float64
     )
@@ -56,7 +55,6 @@ def load(annotation_type, fixed_positions=None):
     )
 
 
-
     ### BIOMETRIC ALGORITHM
     histogram = bob.bio.face.algorithm.Histogram(
         distance_function = bob.math.histogram_intersection,
@@ -69,5 +67,10 @@ def load(annotation_type, fixed_positions=None):
 
     return VanillaBiometricsPipeline(transformer, algorithm)
 
+def load(annotation_type, fixed_positions=None):
+    ####### SOLVING THE FACE CROPPER TO BE USED ##########
+    face_cropper, transform_extra_arguments = get_cropper(annotation_type, fixed_positions)
+    return get_pipeline(face_cropper, transform_extra_arguments)
+   
 pipeline = load(annotation_type, fixed_positions)
 transformer = pipeline.transformer
\ No newline at end of file
diff --git a/bob/bio/face/config/baseline/tf2_inception_resnet.py b/bob/bio/face/config/baseline/tf2_inception_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..1d68f0bad2ec215b6663edbace89665cea0c5ec6
--- /dev/null
+++ b/bob/bio/face/config/baseline/tf2_inception_resnet.py
@@ -0,0 +1,44 @@
+from bob.extension import rc
+from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
+from bob.bio.face.preprocessor import FaceCrop
+from bob.bio.face.config.baseline.helpers import (
+    embedding_transformer_default_cropping,
+    embedding_transformer
+)
+
+from sklearn.pipeline import make_pipeline
+from bob.pipelines.wrappers import wrap
+from bob.bio.base.pipelines.vanilla_biometrics import (
+    Distance,
+    VanillaBiometricsPipeline,
+)
+
+
+if "database" in locals():
+    annotation_type = database.annotation_type
+    fixed_positions = database.fixed_positions
+else:
+    annotation_type = None
+    fixed_positions = None
+
+def load(annotation_type, fixed_positions=None):
+    CROPPED_IMAGE_SIZE = (160, 160)
+    CROPPED_POSITIONS = embedding_transformer_default_cropping(CROPPED_IMAGE_SIZE,
+                                                               annotation_type=annotation_type)
+
+    extractor_path = rc['bob.bio.face.tf2.casia-webface-inception-v2']
+
+    embedding = InceptionResnetv2(checkpoint_path=extractor_path)
+
+    transformer = embedding_transformer(CROPPED_IMAGE_SIZE,
+                                        embedding,
+                                        annotation_type,
+                                        CROPPED_POSITIONS,
+                                        fixed_positions)
+
+    algorithm = Distance()
+
+    return VanillaBiometricsPipeline(transformer, algorithm)
+
+pipeline = load(annotation_type, fixed_positions)
+transformer = pipeline.transformer
diff --git a/bob/bio/face/embeddings/__init__.py b/bob/bio/face/embeddings/__init__.py
index b34ea58693dc9de51979efa8c7c4d732e93c4faa..05228099435a7d31268b27fb9c2199224d3692a3 100644
--- a/bob/bio/face/embeddings/__init__.py
+++ b/bob/bio/face/embeddings/__init__.py
@@ -1,13 +1,38 @@
-from .facenet_sanderberg import FaceNetSanderberg
-from .idiap_inception_resnet import (
-    InceptionResnetv2_MsCeleb,
-    InceptionResnetv2_CasiaWebFace,
-    InceptionResnetv1_MsCeleb,
-    InceptionResnetv1_CasiaWebFace
-)
+import os
+import bob.extension.download
+
+def download_model(model_path, urls, zip_file="model.tar.gz"):
+    """
+    Download and unzip a model from some URL.
+
+    Parameters
+    ----------
+
+    model_path: str
+        Path where the model is supposed to be stored
+
+    urls: list
+        List of paths where the model is stored
 
-from .arface import ArcFace_InsightFaceTF
+    zip_file: str
+        File name after the download
 
+    """
+
+    if not os.path.exists(model_path):
+        os.makedirs(model_path, exist_ok=True)
+        zip_file = os.path.join(model_path, zip_file)
+        bob.extension.download.download_and_unzip(urls, zip_file)
+
+
+from .tf2_inception_resnet import (
+    InceptionResnet,
+    InceptionResnetv2_MsCeleb_CenterLoss_2018,
+    InceptionResnetv2_Casia_CenterLoss_2018,
+    InceptionResnetv1_MsCeleb_CenterLoss_2018,
+    InceptionResnetv1_Casia_CenterLoss_2018,
+    FaceNetSanderberg_20170512_110547
+)
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
@@ -26,11 +51,11 @@ def __appropriate__(*args):
 
 
 __appropriate__(
-    FaceNetSanderberg,
-    InceptionResnetv2_MsCeleb,
-    InceptionResnetv2_CasiaWebFace,
-    InceptionResnetv1_MsCeleb,
-    InceptionResnetv1_CasiaWebFace,
-    ArcFace_InsightFaceTF
+    InceptionResnet,
+    InceptionResnetv2_MsCeleb_CenterLoss_2018,
+    InceptionResnetv1_MsCeleb_CenterLoss_2018,
+    InceptionResnetv2_Casia_CenterLoss_2018,
+    InceptionResnetv1_Casia_CenterLoss_2018,
+    FaceNetSanderberg_20170512_110547
 )
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/bob/bio/face/embeddings/facenet_sanderberg.py b/bob/bio/face/embeddings/facenet_sanderberg.py
deleted file mode 100644
index e0a48dc2c648960b2a3f520b73e9592d28c7d031..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/facenet_sanderberg.py
+++ /dev/null
@@ -1,242 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-"""
-Wrapper for the free FaceNet variant:
-    https://github.com/davidsandberg/facenet
-
-
-    Model 20170512-110547    
-"""
-
-from __future__ import division
-
-from sklearn.base import TransformerMixin, BaseEstimator
-
-import os
-import re
-import logging
-import numpy as np
-from bob.ip.color import gray_to_rgb
-from bob.io.image import to_matplotlib
-from bob.extension import rc
-import bob.extension.download
-import bob.io.base
-from sklearn.utils import check_array
-
-logger = logging.getLogger(__name__)
-
-FACENET_MODELPATH_KEY = "bob.bio.face.facenet_sanderberg_modelpath"
-
-
-def prewhiten(img):
-    mean = np.mean(img)
-    std = np.std(img)
-    std_adj = np.maximum(std, 1.0 / np.sqrt(img.size))
-    y = np.multiply(np.subtract(img, mean), 1 / std_adj)
-    return y
-
-
-def get_model_filenames(model_dir):
-    # code from https://github.com/davidsandberg/facenet
-    files = os.listdir(model_dir)
-    meta_files = [s for s in files if s.endswith(".meta")]
-    if len(meta_files) == 0:
-        raise ValueError("No meta file found in the model directory (%s)" % model_dir)
-    elif len(meta_files) > 1:
-        raise ValueError(
-            "There should not be more than one meta file in the model "
-            "directory (%s)" % model_dir
-        )
-    meta_file = meta_files[0]
-    max_step = -1
-    for f in files:
-        step_str = re.match(r"(^model-[\w\- ]+.ckpt-(\d+))", f)
-        if step_str is not None and len(step_str.groups()) >= 2:
-            step = int(step_str.groups()[1])
-            if step > max_step:
-                max_step = step
-                ckpt_file = step_str.groups()[0]
-    return meta_file, ckpt_file
-
-
-class FaceNetSanderberg(TransformerMixin, BaseEstimator):
-    """Wrapper for the free FaceNet variant:
-    https://github.com/davidsandberg/facenet
-
-    And for a preprocessor you can use::
-
-        from bob.bio.face.preprocessor import FaceCrop
-        # This is the size of the image that this model expects
-        CROPPED_IMAGE_HEIGHT = 160
-        CROPPED_IMAGE_WIDTH = 160
-        # eye positions for frontal images
-        RIGHT_EYE_POS = (46, 53)
-        LEFT_EYE_POS = (46, 107)
-        # Crops the face using eye annotations
-        preprocessor = FaceCrop(
-            cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
-            cropped_positions={'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
-            color_channel='rgb'
-        )
-
-    """
-
-    def __init__(
-        self,
-        model_path=rc[FACENET_MODELPATH_KEY],
-        image_size=160,
-        layer_name="embeddings:0",
-        **kwargs,
-    ):
-        super(FaceNetSanderberg, self).__init__()
-        self.model_path = model_path
-        self.image_size = image_size
-        self.layer_name = layer_name
-        self.loaded = False
-        self._clean_unpicklables()
-
-    def _clean_unpicklables(self):
-        self.session = None
-        self.embeddings = None
-        self.graph = None
-        self.images_placeholder = None
-        self.phase_train_placeholder = None
-
-    def _check_feature(self, img):
-        img = check_array(img, allow_nd=True)
-
-        def _convert(img):
-            assert img.shape[-2] == self.image_size
-            assert img.shape[-3] == self.image_size
-            img = prewhiten(img)
-            return img
-
-        if img.ndim == 3:
-            if img.shape[0]==3:
-                img = np.moveaxis(img, 0, -1)
-            return _convert(img)[None, ...]  # Adding another axis
-        elif img.ndim == 4:
-            if img.shape[1]==3:
-                img = np.moveaxis(img, 1, -1)
-            return _convert(img)
-        else:
-            raise ValueError(f"Image shape {img.shape} not supported")
-
-    def load_model(self):
-        import tensorflow as tf
-
-        tf.compat.v1.reset_default_graph()
-
-        session_conf = tf.compat.v1.ConfigProto(
-            intra_op_parallelism_threads=1, inter_op_parallelism_threads=1
-        )
-        self.graph = tf.Graph()
-        self.session = tf.compat.v1.Session(graph=self.graph, config=session_conf)
-
-        if self.model_path is None:
-            self.model_path = self.get_modelpath()
-        if not os.path.exists(self.model_path):
-            bob.io.base.create_directories_safe(FaceNetSanderberg.get_modelpath())
-            zip_file = os.path.join(
-                FaceNetSanderberg.get_modelpath(), "20170512-110547.zip"
-            )
-            urls = [
-                # This link only works in Idiap CI to save bandwidth.
-                "http://www.idiap.ch/private/wheels/gitlab/"
-                "facenet_model2_20170512-110547.zip",
-                # this link to dropbox would work for everybody
-                "https://www.dropbox.com/s/"
-                "k7bhxe58q7d48g7/facenet_model2_20170512-110547.zip?dl=1",
-            ]
-            bob.extension.download.download_and_unzip(urls, zip_file)
-
-        # code from https://github.com/davidsandberg/facenet
-        model_exp = os.path.expanduser(self.model_path)
-
-        with self.graph.as_default():
-            if os.path.isfile(model_exp):
-                logger.info("Model filename: %s" % model_exp)
-                with tf.compat.v1.gfile.FastGFile(model_exp, "rb") as f:
-                    graph_def = tf.compat.v1.GraphDef()
-                    graph_def.ParseFromString(f.read())
-                    tf.import_graph_def(graph_def, name="")
-            else:
-                logger.info("Model directory: %s" % model_exp)
-                meta_file, ckpt_file = get_model_filenames(model_exp)
-
-                logger.info("Metagraph file: %s" % meta_file)
-                logger.info("Checkpoint file: %s" % ckpt_file)
-
-                saver = tf.compat.v1.train.import_meta_graph(
-                    os.path.join(model_exp, meta_file)
-                )
-                saver.restore(self.session, os.path.join(model_exp, ckpt_file))
-        # Get input and output tensors
-        self.images_placeholder = self.graph.get_tensor_by_name("input:0")
-        self.embeddings = self.graph.get_tensor_by_name(self.layer_name)
-        self.phase_train_placeholder = self.graph.get_tensor_by_name("phase_train:0")
-        logger.info("Successfully loaded the model.")
-        self.loaded = True
-
-    def transform(self, X, **kwargs):
-        def _transform(X):
-
-            images = self._check_feature(X)
-            if not self.loaded:
-                self.load_model()
-
-            feed_dict = {
-                self.images_placeholder: images,
-                self.phase_train_placeholder: False,
-            }
-            features = self.session.run(self.embeddings, feed_dict=feed_dict)
-            return features
-        
-        return [_transform(i) for i in X]
-
-    @staticmethod
-    def get_modelpath():
-        """
-        Get default model path.
-
-        First we try the to search this path via Global Configuration System.
-        If we can not find it, we set the path in the directory
-        `<project>/data`
-        """
-
-        # Priority to the RC path
-        model_path = rc[FACENET_MODELPATH_KEY]
-
-        if model_path is None:
-            import pkg_resources
-
-            model_path = pkg_resources.resource_filename(
-                __name__, "data/FaceNet/20170512-110547"
-            )
-
-        return model_path
-
-    def __setstate__(self, d):
-        # Handling unpicklable objects
-        self.__dict__ = d
-
-    def __getstate__(self):
-        import tensorflow as tf
-
-        # Handling unpicklable objects
-        d = self.__dict__
-        d.pop("session") if "session" in d else None
-        d.pop("embeddings") if "embeddings" in d else None
-        d.pop("graph") if "graph" in d else None
-        d.pop("images_placeholder") if "images_placeholder" in d else None
-        d.pop("phase_train_placeholder") if "phase_train_placeholder" in d else None
-        tf.compat.v1.reset_default_graph()
-        self.loaded = False
-        return d
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
-
-    def fit(self, X, y=None):
-        return self
diff --git a/bob/bio/face/embeddings/idiap_inception_resnet.py b/bob/bio/face/embeddings/idiap_inception_resnet.py
deleted file mode 100644
index 5398b24e24df95d459d33d26a184c905f99e3685..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/idiap_inception_resnet.py
+++ /dev/null
@@ -1,123 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-import os
-from sklearn.base import TransformerMixin, BaseEstimator
-from .tensorflow_compat_v1 import TensorflowCompatV1
-
-
-class InceptionResnetv2_MsCeleb(TensorflowCompatV1):
-    """
-    Inception Restnet v2 model from https://gitlab.idiap.ch/bob/bob.learn.tensorflow/-/blob/1e40a68bfbbb3dd8813c48d50b2f23ff7a399956/bob/learn/tensorflow/network/InceptionResnetV2.py
-
-    This model was trained using the MsCeleb 1M dataset
-
-    The input shape of this model is :math:`3 \times 160 \times 160`
-    The output embedding is :math:`n \times 128`, where :math:`n` is the number of samples
-
-    """
-
-    def __init__(self):
-        from bob.learn.tensorflow.network import inception_resnet_v2_batch_norm
-
-        bob_rc_variable = "bob.bio.face.idiap_inception_resnet_v2_path"
-        urls = ["https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/inception-v2_batchnorm_rgb.tar.gz"]
-        model_subdirectory = "idiap_inception_resnet_v2_path"
-
-        
-        checkpoint_filename = self.get_modelpath(bob_rc_variable, model_subdirectory)
-        self.download_model(checkpoint_filename, urls)
-
-        input_shape = (1, 160, 160, 3)
-        architecture_fn = inception_resnet_v2_batch_norm
-
-        super().__init__(checkpoint_filename, input_shape, architecture_fn)
-
-
-class InceptionResnetv2_CasiaWebFace(TensorflowCompatV1):
-    """
-    Inception Restnet v2 model from https://gitlab.idiap.ch/bob/bob.learn.tensorflow/-/blob/1e40a68bfbbb3dd8813c48d50b2f23ff7a399956/bob/learn/tensorflow/network/InceptionResnetV2.py
-
-    This model was trained using the Casia WebFace
-
-    The input shape of this model is :math:`3 \times 160 \times 160`
-    The output embedding is :math:`n \times 128`, where :math:`n` is the number of samples
-
-    """
-
-    def __init__(self):
-        """Loads the tensorflow model
-        """
-        from bob.learn.tensorflow.network import inception_resnet_v2_batch_norm
-
-        bob_rc_variable = "bob.bio.face.idiap_inception_resnet_v2_casiawebface_path"
-        urls = ["https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/idiap_inception_resnet_v2_casiawebface_path.tar.gz"]
-        model_subdirectory = "idiap_inception_resnet_v2_casiawebface_path"
-
-
-        checkpoint_filename = self.get_modelpath(bob_rc_variable, model_subdirectory)
-        self.download_model(checkpoint_filename, urls)
-
-        input_shape = (1, 160, 160, 3)
-        architecture_fn = inception_resnet_v2_batch_norm
-
-        super().__init__(checkpoint_filename, input_shape, architecture_fn)
-
-
-class InceptionResnetv1_MsCeleb(TensorflowCompatV1):
-    """
-    Inception Restnet v1 model from https://gitlab.idiap.ch/bob/bob.learn.tensorflow/-/blob/1e40a68bfbbb3dd8813c48d50b2f23ff7a399956/bob/learn/tensorflow/network/InceptionResnetV1.py
-
-    This model was trained using the MsCeleb 1M dataset
-
-    The input shape of this model is :math:`3 \times 160 \times 160`
-    The output embedding is :math:`n \times 128`, where :math:`n` is the number of samples
-
-    """
-
-    def __init__(self):
-        from bob.learn.tensorflow.network import inception_resnet_v1_batch_norm
-
-        bob_rc_variable = "bob.bio.face.idiap_inception_resnet_v1_msceleb_path"
-        urls = ["https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/idiap_inception_resnet_v1_msceleb_path.tar.gz"]
-        model_subdirectory = "idiap_inception_resnet_v1_msceleb_path"
-
-        
-        checkpoint_filename = self.get_modelpath(bob_rc_variable, model_subdirectory)
-        self.download_model(checkpoint_filename, urls)
-
-        input_shape = (1, 160, 160, 3)
-        architecture_fn = inception_resnet_v1_batch_norm
-
-        super().__init__(checkpoint_filename, input_shape, architecture_fn)
-
-
-
-class InceptionResnetv1_CasiaWebFace(TensorflowCompatV1):
-    """
-    Inception Restnet v1 model from https://gitlab.idiap.ch/bob/bob.learn.tensorflow/-/blob/1e40a68bfbbb3dd8813c48d50b2f23ff7a399956/bob/learn/tensorflow/network/InceptionResnetV1.py
-
-    This model was trained using the Casia WebFace
-
-    The input shape of this model is :math:`3 \times 160 \times 160`
-    The output embedding is :math:`n \times 128`, where :math:`n` is the number of samples
-
-    """
-
-    def __init__(self):
-        """Loads the tensorflow model
-        """
-        from bob.learn.tensorflow.network import inception_resnet_v1_batch_norm
-
-        bob_rc_variable = "bob.bio.face.idiap_inception_resnet_v1_casiawebface_path"
-        urls = ["https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/idiap_inception_resnet_v1_casiawebface_path.tar.gz"]
-        model_subdirectory = "idiap_inception_resnet_v1_casiawebface_path"
-
-
-        checkpoint_filename = self.get_modelpath(bob_rc_variable, model_subdirectory)
-        self.download_model(checkpoint_filename, urls)
-
-        input_shape = (1, 160, 160, 3)
-        architecture_fn = inception_resnet_v1_batch_norm
-
-        super().__init__(checkpoint_filename, input_shape, architecture_fn)
diff --git a/bob/bio/face/embeddings/tensorflow_compat_v1.py b/bob/bio/face/embeddings/tensorflow_compat_v1.py
deleted file mode 100644
index 216d1cc84377b191ce5f43f3257eeefa12ac5c11..0000000000000000000000000000000000000000
--- a/bob/bio/face/embeddings/tensorflow_compat_v1.py
+++ /dev/null
@@ -1,187 +0,0 @@
-#!/usr/bin/env python
-# vim: set fileencoding=utf-8 :
-
-import os
-import pkg_resources
-import bob.extension.download
-from bob.extension import rc
-from sklearn.base import TransformerMixin, BaseEstimator
-import numpy as np
-import logging
-from sklearn.utils import check_array
-from bob.pipelines.sample import SampleBatch
-import copy
-logger = logging.getLogger(__name__)
-
-
-class TensorflowCompatV1(TransformerMixin, BaseEstimator):
-    """
-    Tensorflow v1 compatible set of transformers.
-
-    Parameters
-    ----------
-    checkpoint_filename: str
-        Path of your checkpoint. If the .meta file is providede the last checkpoint will be loaded.
-
-    input_shape: tuple
-        input_shape: Input shape for the tensorflow neural network
-
-    architecture_fn :
-        A tf.Tensor containing the operations to be executed
-    """
-
-    def __init__(self, checkpoint_filename, input_shape, architecture_fn):
-        """Loads the tensorflow model
-        """
-
-        self.checkpoint_filename = checkpoint_filename
-        self.input_shape = input_shape
-        self.architecture_fn = architecture_fn
-        self.loaded = False
-
-    def transform(self, X):
-        """
-        Forward the data with the loaded neural network
-
-        Parameters
-        ----------
-        X : numpy.ndarray
-            Input Data
-
-        Returns
-        -------
-        numpy.ndarray
-            The features.
-
-        """
-
-        def _transform(data):
-            data = check_array(data, allow_nd=True)
-
-            # THE INPUT SHAPE FOR THESE MODELS
-            # ARE `N x C x H x W`
-
-            # If ndim==3 we add another axis
-            if data.ndim == 3:
-                data = data[None, ...]
-
-            # Making sure it's channels last and has three channels
-            if data.ndim == 4:
-                # Just swiping the second dimension if bob format NxCxHxH
-                if data.shape[1] == 3:
-                    data = np.moveaxis(data, 1, -1)
-
-            if data.shape != self.input_shape:
-                raise ValueError(
-                    f"Image shape {data.shape} not supported. Expected {self.input_shape}"
-                )
-
-            if not self.loaded:
-                self.load_model()
-
-            return self.session.run(self.embedding, feed_dict={self.input_tensor: data},)
-
-        return [_transform(x) for x in X]
-
-    def load_model(self):
-        import tensorflow as tf
-
-        logger.info(f"Loading model `{self.checkpoint_filename}`")
-
-        tf.compat.v1.reset_default_graph()
-
-        self.input_tensor = tf.placeholder(tf.float32, shape=self.input_shape)
-
-        # Taking the embedding
-        prelogits = self.architecture_fn(
-            tf.stack(
-                [
-                    tf.image.per_image_standardization(i)
-                    for i in tf.unstack(self.input_tensor)
-                ]
-            ),
-            mode=tf.estimator.ModeKeys.PREDICT,
-        )[0]
-        self.embedding = tf.nn.l2_normalize(prelogits, dim=1, name="embedding")
-
-        # Initializing the variables of the current architecture_fn
-        self.session = tf.compat.v1.Session()
-        self.session.run(tf.compat.v1.global_variables_initializer())
-
-        # Loading the last checkpoint and overwriting the current variables
-        saver = tf.compat.v1.train.Saver()
-        if os.path.splitext(self.checkpoint_filename)[1] == ".meta":
-            saver.restore(
-                self.session,
-                tf.train.latest_checkpoint(os.path.dirname(self.checkpoint_filename)),
-            )
-        elif os.path.isdir(self.checkpoint_filename):
-            saver.restore(
-                self.session, tf.train.latest_checkpoint(self.checkpoint_filename)
-            )
-        else:
-            saver.restore(self.session, self.checkpoint_filename)
-
-        self.loaded = True
-
-    def __setstate__(self, d):
-        # Handling unpicklable objects
-        self.__dict__ = d        
-
-    def __getstate__(self):
-        # Handling unpicklable objects
-        d = self.__dict__.copy()
-        d.pop("session", None)
-        d.pop("input_tensor", None)
-        d.pop("embedding", None)
-        d["loaded"] = False
-        return d
-
-    def get_modelpath(self, bob_rc_variable, model_subdirectory):
-        """
-        Get default model path.
-
-        First we try the to search this path via Global Configuration System.
-        If we can not find it, we set the path in the directory
-        `<project>/data`
-        """
-
-        # Priority to the RC path
-        model_path = rc[bob_rc_variable]
-
-        if model_path is None:
-
-            model_path = pkg_resources.resource_filename(
-                __name__, os.path.join("data", model_subdirectory)
-            )
-
-        return model_path
-
-    def download_model(self, model_path, urls, zip_file="model.tar.gz"):
-        """
-        Download and unzip a model from some URL.
-
-        Parameters
-        ----------
-
-        model_path: str
-            Path where the model is supposed to be stored
-
-        urls: list
-            List of paths where the model is stored
-
-        zip_file: str
-            File name after the download
-
-        """
-
-        if not os.path.exists(model_path):
-            bob.io.base.create_directories_safe(model_path)
-            zip_file = os.path.join(model_path, zip_file)
-            bob.extension.download.download_and_unzip(urls, zip_file)
-
-    def fit(self, X, y=None):
-        return self
-
-    def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/embeddings/tf2_inception_resnet.py b/bob/bio/face/embeddings/tf2_inception_resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..a80e15652b312d379a960dc0beeddb5bc093c439
--- /dev/null
+++ b/bob/bio/face/embeddings/tf2_inception_resnet.py
@@ -0,0 +1,256 @@
+import tensorflow as tf
+from bob.learn.tensorflow.utils.image import to_channels_last
+from sklearn.base import TransformerMixin, BaseEstimator
+from sklearn.utils import check_array
+
+from tensorflow.keras import Sequential
+from tensorflow.keras.layers.experimental import preprocessing
+from bob.extension import rc
+from functools import partial
+import pkg_resources
+import os
+from bob.bio.face.embeddings import download_model
+
+
+def sanderberg_rescaling():
+    # FIXED_STANDARDIZATION from https://github.com/davidsandberg/facenet
+    # [-0.99609375, 0.99609375]
+    preprocessor = preprocessing.Rescaling(scale=1 / 128, offset=-127.5 / 128)
+    return preprocessor
+
+
+class InceptionResnet(TransformerMixin, BaseEstimator):
+    """
+    Base Transformer for InceptionResnet architectures.
+
+    Szegedy, Christian, et al. "Inception-v4, inception-resnet and the impact of residual connections on learning." arXiv preprint arXiv:1602.07261 (2016).
+
+    Parameters
+    ----------
+
+    checkpoint_path: str
+       Path containing the checkpoint
+
+    preprocessor: 
+        Preprocessor function
+
+    """
+
+    def __init__(self, checkpoint_path, preprocessor=None, **kwargs):
+        super().__init__(**kwargs)
+        self.checkpoint_path = checkpoint_path
+        self.model = None
+        self.preprocessor = preprocessor
+
+    def load_model(self):
+        self.model = tf.keras.models.load_model(self.checkpoint_path)
+
+    def inference(self, X):
+        if self.preprocessor is not None:
+            X = self.preprocessor(tf.cast(X, "float32"))
+
+        prelogits = self.model(X, training=False)
+        embeddings = tf.math.l2_normalize(prelogits, axis=-1)
+        return embeddings
+
+    def transform(self, X):
+        if self.model is None:
+            self.load_model()
+
+        X = check_array(X, allow_nd=True)
+        X = tf.convert_to_tensor(X)
+        X = to_channels_last(X)
+
+        if X.shape[-3:] != self.model.input_shape[-3:]:
+            raise ValueError(
+                f"Image shape {X.shape} not supported. Expected {self.model.input_shape}"
+            )
+
+        return self.inference(X).numpy()
+
+    def __setstate__(self, d):
+        self.__dict__ = d
+
+    def __getstate__(self):
+        # Handling unpicklable objects
+        d = self.__dict__.copy()
+        d["model"] = None        
+        return d
+
+    def _more_tags(self):
+        return {"stateless": True, "requires_fit": False}
+
+
+class InceptionResnetv2_MsCeleb_CenterLoss_2018(InceptionResnet):
+    """
+    InceptionResnet v2 model trained in 2018 using the MSCeleb dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self):
+        internal_path = pkg_resources.resource_filename(
+            __name__, os.path.join("data", "inceptionresnetv2_msceleb_centerloss_2018"),
+        )
+
+        checkpoint_path = (
+            internal_path
+            if rc["bob.bio.face.models.InceptionResnetv2_MsCeleb_CenterLoss_2018"]
+            is None
+            else rc["bob.bio.face.models.InceptionResnetv2_MsCeleb_CenterLoss_2018"]
+        )
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_msceleb_centerloss_2018.tar.gz"
+        ]
+
+        download_model(
+            checkpoint_path, urls, "inceptionresnetv2_msceleb_centerloss_2018.tar.gz"
+        )
+
+        super(InceptionResnetv2_MsCeleb_CenterLoss_2018, self).__init__(
+            checkpoint_path, preprocessor=tf.image.per_image_standardization,
+        )
+
+
+class InceptionResnetv2_Casia_CenterLoss_2018(InceptionResnet):
+    """
+    InceptionResnet v2 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self):
+        internal_path = pkg_resources.resource_filename(
+            __name__, os.path.join("data", "inceptionresnetv2_casia_centerloss_2018"),
+        )
+
+        checkpoint_path = (
+            internal_path
+            if rc["bob.bio.face.models.InceptionResnetv2_Casia_CenterLoss_2018"] is None
+            else rc["bob.bio.face.models.InceptionResnetv2_Casia_CenterLoss_2018"]
+        )
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv2_casia_centerloss_2018.tar.gz"
+        ]
+
+        download_model(
+            checkpoint_path, urls, "inceptionresnetv2_casia_centerloss_2018.tar.gz"
+        )
+
+        super(InceptionResnetv2_Casia_CenterLoss_2018, self).__init__(
+            checkpoint_path, preprocessor=tf.image.per_image_standardization,
+        )
+
+
+
+class InceptionResnetv1_Casia_CenterLoss_2018(InceptionResnet):
+    """
+    InceptionResnet v1 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self):
+        internal_path = pkg_resources.resource_filename(
+            __name__, os.path.join("data", "inceptionresnetv1_casia_centerloss_2018"),
+        )
+
+        checkpoint_path = (
+            internal_path
+            if rc["bob.bio.face.models.InceptionResnetv1_Casia_CenterLoss_2018"] is None
+            else rc["bob.bio.face.models.InceptionResnetv1_Casia_CenterLoss_2018"]
+        )
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_casia_centerloss_2018.tar.gz"
+        ]
+
+        download_model(
+            checkpoint_path, urls, "inceptionresnetv1_casia_centerloss_2018.tar.gz"
+        )        
+
+        super(InceptionResnetv1_Casia_CenterLoss_2018, self).__init__(
+            checkpoint_path, preprocessor=tf.image.per_image_standardization,
+        )
+
+class InceptionResnetv1_MsCeleb_CenterLoss_2018(InceptionResnet):
+    """
+    InceptionResnet v1 model trained in 2018 using the MsCeleb dataset in the context of the work:
+
+    Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
+
+    """
+
+    def __init__(self):
+        internal_path = pkg_resources.resource_filename(
+            __name__, os.path.join("data", "inceptionresnetv1_msceleb_centerloss_2018"),
+        )
+
+        checkpoint_path = (
+            internal_path
+            if rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"] is None
+            else rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
+        )
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/inceptionresnetv1_msceleb_centerloss_2018.tar.gz"
+        ]
+
+        download_model(
+            checkpoint_path, urls, "inceptionresnetv1_msceleb_centerloss_2018.tar.gz"
+        )        
+
+        super(InceptionResnetv1_MsCeleb_CenterLoss_2018, self).__init__(
+            checkpoint_path, preprocessor=tf.image.per_image_standardization,
+        )
+
+
+class FaceNetSanderberg_20170512_110547(InceptionResnet):
+    """
+    Wrapper for the free FaceNet from David Sanderberg model 20170512_110547:
+    https://github.com/davidsandberg/facenet
+
+    And for a preprocessor you can use::
+
+        from bob.bio.face.preprocessor import FaceCrop
+        # This is the size of the image that this model expects
+        CROPPED_IMAGE_HEIGHT = 160
+        CROPPED_IMAGE_WIDTH = 160
+        # eye positions for frontal images
+        RIGHT_EYE_POS = (46, 53)
+        LEFT_EYE_POS = (46, 107)
+        # Crops the face using eye annotations
+        preprocessor = FaceCrop(
+            cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
+            cropped_positions={'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
+            color_channel='rgb'
+        )
+    """
+
+    def __init__(self):
+        internal_path = pkg_resources.resource_filename(
+            __name__, os.path.join("data", "facenet_sanderberg_20170512_110547"),
+        )
+
+        checkpoint_path = (
+            internal_path
+            if rc["bob.bio.face.models.facenet_sanderberg_20170512_110547"] is None
+            else rc["bob.bio.face.models.facenet_sanderberg_20170512_110547"]
+        )
+
+        urls = [
+            "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/tf2/facenet_sanderberg_20170512_110547.tar.gz"
+        ]
+
+        download_model(
+            checkpoint_path, urls, "facenet_sanderberg_20170512_110547.tar.gz"
+        )        
+
+        super(FaceNetSanderberg_20170512_110547, self).__init__(
+            checkpoint_path, tf.image.per_image_standardization,
+        )
\ No newline at end of file
diff --git a/bob/bio/face/extractor/GridGraph.py b/bob/bio/face/extractor/GridGraph.py
index 313c2445219a10d07f7bbb655eeb5bf5369d70f8..ce7a8a37a515f5ecdea25dc2c96234573085acdc 100644
--- a/bob/bio/face/extractor/GridGraph.py
+++ b/bob/bio/face/extractor/GridGraph.py
@@ -211,7 +211,6 @@ class GridGraph(Extractor):
       The list of Gabor jets extracted from the image.
       The 2D location of the jet's nodes is not returned.
     """
-
         assert image.ndim == 2
         assert isinstance(image, numpy.ndarray)
         image = image.astype(numpy.float64)
diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py
index 18719c1577e1f75de7d22ead2cb2f1b3363477c3..9b04b9decc79e3099b987b1e67c2843e34a096e0 100644
--- a/bob/bio/face/preprocessor/FaceCrop.py
+++ b/bob/bio/face/preprocessor/FaceCrop.py
@@ -3,6 +3,7 @@ import numpy
 import logging
 
 from .Base import Base
+from sklearn.base import TransformerMixin, BaseEstimator
 
 logger = logging.getLogger("bob.bio.face")
 from bob.bio.base import load_resource
@@ -370,3 +371,64 @@ class FaceCrop(Base):
     def __setstate__(self, d):
         self.__dict__ = d
         self._init_non_pickables()
+
+
+class MultiFaceCrop(TransformerMixin, BaseEstimator):
+    def __init__(
+        self,
+        cropped_image_size,
+        cropped_positions_list,
+        fixed_positions=None,
+        mask_sigma=None,
+        mask_neighbors=5,
+        mask_seed=None,
+        annotator=None,
+        allow_upside_down_normalized_faces=False,
+        **kwargs,
+    ):
+
+        assert isinstance(cropped_positions_list, list)
+
+        self.croppers = {}
+        for cropped_positions in cropped_positions_list:
+            assert len(cropped_positions) == 2
+            self.croppers[tuple(cropped_positions)] = FaceCrop(
+                cropped_image_size,
+                cropped_positions,
+                fixed_positions,
+                mask_sigma,
+                mask_neighbors,
+                mask_seed,
+                annotator,
+                allow_upside_down_normalized_faces,
+                **kwargs,
+            )
+
+    def transform(self, X, annotations=None):
+        subsets = {k: {"X": [], "annotations": []} for k in self.croppers.keys()}
+
+        def assign(X_elem, annotations_elem):
+            valid_keys = [
+                k
+                for k in self.croppers.keys()
+                if set(k).issubset(set(annotations_elem.keys()))
+            ]
+            assert (
+                len(valid_keys) == 1
+            ), "Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
+                len(valid_keys)
+            )
+            subsets[valid_keys[0]]["X"].append(X_elem)
+            subsets[valid_keys[0]]["annotations"].append(annotations_elem)
+
+        for X_elem, annotations_elem in zip(X, annotations):
+            assign(X_elem, annotations_elem)
+
+        transformed_subsets = {
+            k: self.croppers[k].transform(**subsets[k]) for k in subsets.keys()
+        }
+
+        return [item for sublist in transformed_subsets.values() for item in sublist]
+
+    def fit(self, X, y=None):
+        return self
diff --git a/bob/bio/face/preprocessor/HistogramEqualization.py b/bob/bio/face/preprocessor/HistogramEqualization.py
index 36c497cfbd6fb3ede17058481fc31bb4499109dc..63f7a594bd06b99cf4eb51be8bf396310aebbefa 100644
--- a/bob/bio/face/preprocessor/HistogramEqualization.py
+++ b/bob/bio/face/preprocessor/HistogramEqualization.py
@@ -98,11 +98,15 @@ class HistogramEqualization(Base):
             if self.cropper is not None:
                 # TODO: USE THE TAG `ALLOW_ANNOTATIONS`
                 image = (
-                    self.cropper.transform([image])[0]
+                    self.cropper.transform([image])
                     if annotations is None
-                    else self.cropper.transform([image], [annotations])[0]
+                    else self.cropper.transform([image], [annotations])
                 )
-            image = self.equalize_histogram(image)
+                image = self.equalize_histogram(image[0])
+            else:
+                # Handle with the cropper is None
+                image = self.equalize_histogram(image)
+
             return self.data_type(image)
 
         if annotations is None:
diff --git a/bob/bio/face/preprocessor/INormLBP.py b/bob/bio/face/preprocessor/INormLBP.py
index c2235e63b74e191b2560b369189268aca9a1d3f4..d3a8bc10b6c94b58a01939d5e112b299893255f5 100644
--- a/bob/bio/face/preprocessor/INormLBP.py
+++ b/bob/bio/face/preprocessor/INormLBP.py
@@ -118,22 +118,28 @@ class INormLBP(Base):
       The cropped and photometrically enhanced face.
     """
 
-        def _crop(image, annotations=None):
+        def _crop_one_sample(image, annotations=None):
             image = self.change_color_channel(image)
             if self.cropper is not None:
                 # TODO: USE THE TAG `ALLOW_ANNOTATIONS`
                 image = (
-                    self.cropper.transform([image])[0]
+                    self.cropper.transform([image])
                     if annotations is None
-                    else self.cropper.transform([image], [annotations])[0]
+                    else self.cropper.transform([image], [annotations])
                 )
-            image = self.lbp_extractor(image)
+
+                # LBP's doesn't work with batches, so we have to work this out
+                image = self.lbp_extractor(image[0])
+            else:
+                # Handle with the cropper is None
+                image = self.lbp_extractor(image)
+
             return self.data_type(image)
 
         if annotations is None:
-            return [_crop(data) for data in X]
+            return [_crop_one_sample(data) for data in X]
         else:
-            return [_crop(data, annot) for data, annot in zip(X, annotations)]
+            return [_crop_one_sample(data, annot) for data, annot in zip(X, annotations)]
 
     def __getstate__(self):
         d = dict(self.__dict__)
diff --git a/bob/bio/face/preprocessor/Scale.py b/bob/bio/face/preprocessor/Scale.py
index 82ee172e150f032ee98a292bdbed4516c6f62789..53166f42f641dfee017c4011c14702c135f3dfe7 100644
--- a/bob/bio/face/preprocessor/Scale.py
+++ b/bob/bio/face/preprocessor/Scale.py
@@ -2,7 +2,7 @@ from sklearn.preprocessing import FunctionTransformer
 from skimage.transform import resize
 from sklearn.utils import check_array
 from bob.io.image import to_matplotlib, to_bob
-
+import numpy as np
 
 def scale(images, target_img_size):
     """Scales a list of images to the target size
@@ -22,15 +22,21 @@ def scale(images, target_img_size):
     if isinstance(target_img_size, int):
         target_img_size = (target_img_size, target_img_size)
 
-    images = check_array(images, allow_nd=True)
-    images = to_matplotlib(images)
-
     # images are always batched
+    images = check_array(images, allow_nd=True)
+    
     output_shape = tuple(target_img_size)
-    output_shape = tuple(images.shape[-1:-2]) + output_shape
-    images = resize(images, output_shape=output_shape)
-
-    return to_bob(images)
+    output_shape = tuple(images.shape[0:1]) + output_shape
+
+    # If it's Bob batched RGB images
+    if images.ndim > 3:
+        images = to_matplotlib(images)
+        images = resize(images, output_shape=output_shape)
+        return to_bob(images)
+    else:
+        # If it's Bob batched gray scaled images
+        images = resize(images, output_shape=output_shape)
+        return images
 
 
 def Scale(target_img_size):
diff --git a/bob/bio/face/preprocessor/SelfQuotientImage.py b/bob/bio/face/preprocessor/SelfQuotientImage.py
index d83248b01da44c3c554acfeea40a0c6224b32fc6..35ed158cc4f27e06cc7fbe8b897c2a5cb8e38030 100644
--- a/bob/bio/face/preprocessor/SelfQuotientImage.py
+++ b/bob/bio/face/preprocessor/SelfQuotientImage.py
@@ -96,11 +96,15 @@ class SelfQuotientImage(Base):
             if self.cropper is not None:
                 # TODO: USE THE TAG `ALLOW_ANNOTATIONS`
                 image = (
-                    self.cropper.transform([image])[0]
+                    self.cropper.transform([image])
                     if annotations is None
-                    else self.cropper.transform([image], [annotations])[0]
-                )
-            image = self.self_quotient(image)
+                    else self.cropper.transform([image], [annotations])
+                )                
+                image = self.self_quotient(image)[0]
+            else:
+                # Handle with the cropper is None
+                image = self.self_quotient(image)
+
             return self.data_type(image)
 
         if annotations is None:
diff --git a/bob/bio/face/preprocessor/TanTriggs.py b/bob/bio/face/preprocessor/TanTriggs.py
index a1abfd38b91ff40078f022cbe28d6887ad995eee..c1344d3db11a1211cdb9de3de45c82c9f9118413 100644
--- a/bob/bio/face/preprocessor/TanTriggs.py
+++ b/bob/bio/face/preprocessor/TanTriggs.py
@@ -108,22 +108,26 @@ class TanTriggs(Base):
       The cropped and photometrically enhanced face.
     """
 
-        def _crop(image, annotations=None):
+        def _crop_one_sample(image, annotations=None):
             image = self.change_color_channel(image)
             if self.cropper is not None:
                 # TODO: USE THE TAG `ALLOW_ANNOTATIONS`
                 image = (
-                    self.cropper.transform([image])[0]
+                    self.cropper.transform([image])
                     if annotations is None
-                    else self.cropper.transform([image], [annotations])[0]
+                    else self.cropper.transform([image], [annotations])
                 )
-            image = self.tan_triggs(image)
+                image = self.tan_triggs(image[0])
+            else:
+                # Handle with the cropper is None
+                image = self.tan_triggs(image)
+            
             return self.data_type(image)
 
         if annotations is None:
-            return [_crop(data) for data in X]
+            return [_crop_one_sample(data) for data in X]
         else:
-            return [_crop(data, annot) for data, annot in zip(X, annotations)]
+            return [_crop_one_sample(data, annot) for data, annot in zip(X, annotations)]
 
     def __getstate__(self):
         d = dict(self.__dict__)
diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py
index 6b46f5ab3e0c5e7d9813bc1fb39b0882e7a814f3..851aad1ec65f904fd56e8ef14d4388c4dd1df1d8 100644
--- a/bob/bio/face/test/test_baselines.py
+++ b/bob/bio/face/test/test_baselines.py
@@ -12,6 +12,7 @@ import os
 import bob.io.base
 import functools
 import copy
+import tensorflow as tf
 
 
 images = dict()
@@ -70,9 +71,11 @@ def run_baseline(baseline, samples_for_training=[]):
     # CHECKPOINTING
     with tempfile.TemporaryDirectory() as d:
 
+        cpy = copy.deepcopy(pipeline)
         checkpoint_pipeline = checkpoint_vanilla_biometrics(
-            copy.deepcopy(pipeline), base_dir=d
+            cpy, base_dir=d
         )
+
         checkpoint_scores = checkpoint_pipeline([], biometric_references, probes)
         assert len(checkpoint_scores) == 1
         assert len(checkpoint_scores[0]) == 1
@@ -85,10 +88,11 @@ def run_baseline(baseline, samples_for_training=[]):
         assert "scores" in dirs
 
     # DASK
-    with tempfile.TemporaryDirectory() as d:
 
+    with tempfile.TemporaryDirectory() as d:
+        cpy = copy.deepcopy(pipeline)
         dask_pipeline = dask_vanilla_biometrics(
-            checkpoint_vanilla_biometrics(copy.deepcopy(pipeline), base_dir=d)
+            checkpoint_vanilla_biometrics(cpy, base_dir=d)
         )
         dask_scores = dask_pipeline([], biometric_references, probes)
         dask_scores = dask_scores.compute(scheduler="single-threaded")
@@ -102,7 +106,6 @@ def run_baseline(baseline, samples_for_training=[]):
         assert "samplewrapper-2" in dirs
         assert "scores" in dirs
 
-
 def test_facenet_baseline():
     run_baseline("facenet-sanderberg")
 
@@ -122,14 +125,14 @@ def test_inception_resnetv1_msceleb():
 def test_inception_resnetv1_casiawebface():
     run_baseline("inception-resnetv1-casiawebface")
 
-
+"""
 def test_arcface_insight_tf():
     import tensorflow as tf
 
     tf.compat.v1.reset_default_graph()
 
     run_baseline("arcface-insight-tf")
-
+"""
 
 def test_gabor_graph():
     run_baseline("gabor_graph")
diff --git a/bob/bio/face/test/test_embeddings.py b/bob/bio/face/test/test_embeddings.py
index 316423881b873de657c29d13a3f4f22b41fd0fd1..0d518b0a35cf2d0328fdf928bb149bff3df390ef 100644
--- a/bob/bio/face/test/test_embeddings.py
+++ b/bob/bio/face/test/test_embeddings.py
@@ -5,35 +5,16 @@ from bob.pipelines import Sample, wrap
 import pkg_resources
 
 
-def test_facenet():
-    from bob.bio.face.embeddings import FaceNetSanderberg
-
-    np.random.seed(10)
-
-    transformer = FaceNetSanderberg()
-    # Raw data
-    data = np.random.rand(3, 160, 160).astype("uint8")
-    output = transformer.transform([data])[0]
-    assert output.size == 128, output.shape
-
-    # Sample Batch
-    sample = Sample(data)
-    transformer_sample = wrap(["sample"], transformer)
-    output = [s.data for s in transformer_sample.transform([sample])][0]    
-    assert output.size == 128, output.shape
-
-
 def test_idiap_inceptionv2_msceleb():
-    from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
+    from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb_CenterLoss_2018
 
     reference = bob.io.base.load(
         pkg_resources.resource_filename(
             "bob.bio.face.test", "data/inception_resnet_v2_rgb.hdf5"
         )
     )
-
     np.random.seed(10)
-    transformer = InceptionResnetv2_MsCeleb()
+    transformer = InceptionResnetv2_MsCeleb_CenterLoss_2018()
     data = (np.random.rand(3, 160, 160) * 255).astype("uint8")
     output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
@@ -48,10 +29,10 @@ def test_idiap_inceptionv2_msceleb():
 
 
 def test_idiap_inceptionv2_casia():
-    from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
+    from bob.bio.face.embeddings import InceptionResnetv2_Casia_CenterLoss_2018
 
     np.random.seed(10)
-    transformer = InceptionResnetv2_CasiaWebFace()
+    transformer = InceptionResnetv2_Casia_CenterLoss_2018()
     data = np.random.rand(3, 160, 160).astype("uint8")
     output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
@@ -65,10 +46,10 @@ def test_idiap_inceptionv2_casia():
 
 
 def test_idiap_inceptionv1_msceleb():
-    from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
+    from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb_CenterLoss_2018
 
     np.random.seed(10)
-    transformer = InceptionResnetv1_MsCeleb()
+    transformer = InceptionResnetv1_MsCeleb_CenterLoss_2018()
     data = np.random.rand(3, 160, 160).astype("uint8")
     output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
@@ -82,10 +63,10 @@ def test_idiap_inceptionv1_msceleb():
 
 
 def test_idiap_inceptionv1_casia():
-    from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
+    from bob.bio.face.embeddings import InceptionResnetv1_Casia_CenterLoss_2018
 
     np.random.seed(10)
-    transformer = InceptionResnetv1_CasiaWebFace()
+    transformer = InceptionResnetv1_Casia_CenterLoss_2018()
     data = np.random.rand(3, 160, 160).astype("uint8")
     output = transformer.transform([data])[0]
     assert output.size == 128, output.shape
@@ -97,7 +78,23 @@ def test_idiap_inceptionv1_casia():
 
     assert output.size == 128, output.shape
 
+def test_facenet_sanderberg():
+    from bob.bio.face.embeddings import FaceNetSanderberg_20170512_110547
+
+    np.random.seed(10)
+    transformer = FaceNetSanderberg_20170512_110547()
+    data = np.random.rand(3, 160, 160).astype("uint8")
+    output = transformer.transform([data])[0]
+    assert output.size == 128, output.shape
+
+    # Sample Batch
+    sample = Sample(data)
+    transformer_sample = wrap(["sample"], transformer)
+    output = [s.data for s in transformer_sample.transform([sample])][0]
+    assert output.size == 128, output.shape
+
 
+"""
 def test_arface_insight_tf():
     import tensorflow as tf
 
@@ -116,3 +113,4 @@ def test_arface_insight_tf():
     transformer_sample = wrap(["sample"], transformer)
     output = [s.data for s in transformer_sample.transform([sample])][0]
     assert output.size == 512, output.shape
+"""
\ No newline at end of file
diff --git a/bob/bio/face/test/test_transformers.py b/bob/bio/face/test/test_transformers.py
index f4e19bc74ea0b0208215dd1aff54293bcdd851ce..284a48aa9719b452d43b001e7da44cf7d6c6968c 100644
--- a/bob/bio/face/test/test_transformers.py
+++ b/bob/bio/face/test/test_transformers.py
@@ -11,7 +11,7 @@ def get_fake_sample(face_size=(160, 160), eyes={"leye": (46, 107), "reye": (46,
     return Sample(data, key="1", annotations=annotations)
 
 
-def test_facenet():
+def test_facenet_sanderberg():
     transformer = load_resource("facenet-sanderberg", "transformer")
 
     fake_sample = get_fake_sample()
@@ -60,7 +60,7 @@ def test_inception_resnetv1_casiawebface():
     transformed_data = transformed_sample.data
     assert transformed_sample.data.size == 128
 
-
+"""
 def test_arcface_insight_tf():
     import tensorflow as tf
     tf.compat.v1.reset_default_graph()
@@ -71,7 +71,7 @@ def test_arcface_insight_tf():
     transformed_sample = transformer.transform([fake_sample])[0]
     transformed_data = transformed_sample.data
     assert transformed_sample.data.size == 512
-
+"""
 
 def test_gabor_graph():
     transformer = load_resource("gabor-graph", "transformer")
diff --git a/develop.cfg b/develop.cfg
index 02064b99239b338aaa5873626cb83f642959f689..4ba1def3457b120f4a4b42274acf4491f2037f55 100644
--- a/develop.cfg
+++ b/develop.cfg
@@ -7,7 +7,9 @@ parts = scripts
 develop = src/bob.pipelines
           src/bob.bio.base
           src/bob.ip.gabor
-          .
+          src/bob.db.multipie
+	  src/bob.learn.tensorflow
+	  .
           
 
 
@@ -15,7 +17,8 @@ eggs = bob.bio.face
        bob.pipelines
        bob.bio.base
        bob.ip.gabor
- 
+       bob.db.multipie
+       bob.learn.tensorflow
 
 extensions = bob.buildout
              mr.developer
@@ -27,8 +30,10 @@ auto-checkout = *
 
 [sources]
 bob.pipelines = git git@gitlab.idiap.ch:bob/bob.pipelines
-bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base branch=dask-pipelines
+bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base
 bob.ip.gabor = git git@gitlab.idiap.ch:bob/bob.ip.gabor
+bob.db.multipie = git git@gitlab.idiap.ch:bob/bob.db.multipie
+bob.learn.tensorflow = git git@gitlab.idiap.ch/bob/bob.learn.tensorflow
 
 [scripts]
 recipe = bob.buildout:scripts