diff --git a/bob/bio/face/annotator/__init__.py b/bob/bio/face/annotator/__init__.py
index e06a34aec272550003d15ea93b8f3c15ff393213..b058d897018835424f7b6c4d999c85d289df513a 100644
--- a/bob/bio/face/annotator/__init__.py
+++ b/bob/bio/face/annotator/__init__.py
@@ -1,5 +1,6 @@
 import bob.ip.facedetect
 
+
 def bounding_box_to_annotations(bbx):
     """Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations.
 
@@ -87,4 +88,4 @@ __appropriate__(
     BobIpTinyface,
 )
 
-__all__ = [_ for _ in dir() if not _.startswith('_')]
+__all__ = [_ for _ in dir() if not _.startswith('_')]
\ No newline at end of file
diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py
index d2790c826d61e4510cfa1e2a61d51166f9d1e270..fc06d274b7b23bf52058e15574a9af8d5c6d9c3a 100644
--- a/bob/bio/face/annotator/bobiptinyface.py
+++ b/bob/bio/face/annotator/bobiptinyface.py
@@ -26,10 +26,7 @@ class BobIpTinyface(Base):
             Annotations with (topleft, bottomright) keys (or None).
         """
 
-        annotations = self.tinyface.detect(image)
-
         if annotations is not None:
-            r = annotations[0]
-            return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])}
+            return annotations[0]
         else:
             return None
diff --git a/bob/bio/face/config/annotator/tinyface.py b/bob/bio/face/config/annotator/tinyface.py
index 2274bfeff38e836c34e34cb1846915b58a6d769e..bf223ea9802174d27ca2b7c0a41c6da61fb56f2e 100644
--- a/bob/bio/face/config/annotator/tinyface.py
+++ b/bob/bio/face/config/annotator/tinyface.py
@@ -1,3 +1,3 @@
 from bob.bio.face.annotator import BobIpTinyface
 
-annotator = BobIpTinyface()
+annotator = BobIpTinyface()
\ No newline at end of file
diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py
index eeb062004bc0e55b3db402dfa8e39be42d9ed3cf..b28ccd595c0e4a22aa7be65827055d5d8fbaf0fd 100644
--- a/bob/bio/face/config/baseline/mxnet_pipe.py
+++ b/bob/bio/face/config/baseline/mxnet_pipe.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import mxnet_model
+from bob.bio.face.extractor import MxNetModel
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
 import scipy.spatial
@@ -39,7 +39,7 @@ transform_extra_arguments = (
 )
 
 
-extractor_transformer = mxnet_model()
+extractor_transformer = MxNetModel()
 
 algorithm = Distance(
     distance_function=scipy.spatial.distance.cosine, is_distance_function=True
diff --git a/bob/bio/face/config/baseline/mxnet_tinyface_pipe.py b/bob/bio/face/config/baseline/mxnet_tinyface_pipe.py
new file mode 100644
index 0000000000000000000000000000000000000000..e19969d0ea08c874aff7cbc5b65197cd7446da69
--- /dev/null
+++ b/bob/bio/face/config/baseline/mxnet_tinyface_pipe.py
@@ -0,0 +1,30 @@
+import bob.bio.base
+from bob.bio.face.preprocessor import FaceCrop
+from bob.bio.face.annotator import BobIpTinyface
+from bob.bio.face.extractor import MxNetModel
+
+from bob.bio.base.algorithm import Distance
+from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
+import scipy.spatial
+from bob.bio.base.pipelines.vanilla_biometrics import Distance
+from sklearn.pipeline import make_pipeline
+from bob.pipelines import wrap
+from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
+
+
+annotator_transformer = BobIpTinyface()
+
+preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',annotator=annotator_transformer)
+
+extractor_transformer = MxNetModel()
+
+
+algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
+
+transformer = make_pipeline(
+    wrap(["sample"], preprocessor_transformer),
+    wrap(["sample"], extractor_transformer)
+)
+
+pipeline = VanillaBiometricsPipeline(transformer, algorithm)
+transformer = pipeline.transformer
\ No newline at end of file
diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py
index aac2d3f1c45559e35545fb66a5c02ca97aa76790..8d8c63f8dc409f59990792c31a401166ad5a4e16 100644
--- a/bob/bio/face/config/baseline/opencv_pipe.py
+++ b/bob/bio/face/config/baseline/opencv_pipe.py
@@ -35,6 +35,15 @@ preprocessor_transformer = FaceCrop(
     fixed_positions=fixed_positions,
 )
 
+cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
+# Preprocessor
+preprocessor_transformer = FaceCrop(
+    cropped_image_size=(224, 224),
+    cropped_positions={"leye": (100, 140), "reye": (100, 95)},
+    color_channel="rgb",
+    fixed_positions=fixed_positions,
+)
+
 transform_extra_arguments = (
     None
     if (cropped_positions is None or fixed_positions is not None)
diff --git a/bob/bio/face/config/baseline/opencv_tinyface_pipe.py b/bob/bio/face/config/baseline/opencv_tinyface_pipe.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a61be6c8c22bbbccdb9ca44621a2e31f134acf1
--- /dev/null
+++ b/bob/bio/face/config/baseline/opencv_tinyface_pipe.py
@@ -0,0 +1,70 @@
+import bob.bio.base
+from bob.bio.face.preprocessor import FaceCrop
+from bob.bio.face.annotator.bobiptinyface import BobIpTinyface
+from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
+
+from bob.bio.face.extractor import opencv_model
+
+from bob.bio.base.algorithm import Distance
+from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
+import scipy.spatial
+from bob.bio.base.pipelines.vanilla_biometrics import Distance
+
+
+from sklearn.pipeline import make_pipeline
+from bob.pipelines import wrap
+from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
+
+
+#memory_demanding = False
+#if "database" in locals():
+#    annotation_type = database.annotation_type
+#    fixed_positions = database.fixed_positions
+#    memory_demanding = (
+#        database.memory_demanding if hasattr(database, "memory_demanding") else False
+#    )
+
+#else:
+#    annotation_type = None
+#    fixed_positions = None
+
+
+annotator_transformer = BobIpTinyface()
+
+
+#right_eye=annotator_transformer.annotate()["reye"]
+#left_eye=annotator_transformer.annotate()["leye"]
+#cropped_positions={'leye':left_eye, 'reye':right_eye}
+#topleft = annotator_transformer.annotate()["topleft"]
+#bottomright = annotator_transformer.annotate()["bottomright"]
+cropped_positions={'leye':(49,72), 'reye':(49,38)}
+preprocessor_transformer = FaceCrop(cropped_image_size=(224,224),cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb', annotator = annotator_transformer)
+
+
+
+
+
+
+extractor_transformer = opencv_model()
+
+
+
+
+algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
+
+
+# Chain the Transformers together
+#transformer = make_pipeline(
+#    wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments),
+#    wrap(["sample"], extractor_transformer)
+#    # Add more transformers here if needed
+#)
+
+transformer = make_pipeline(
+    wrap(["sample"], preprocessor_transformer),
+    wrap(["sample"], extractor_transformer)
+)
+
+# Assemble the Vanilla Biometric pipeline and execute
+pipeline = VanillaBiometricsPipeline(transformer, algorithm)
+transformer = pipeline.transformer
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
index ec5c4d9d619110d81c0c576088112aee10d6d282..843e53818a851b7f1cee481c7c011eeff8f4c8c8 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import pytorch_loaded_model
+from bob.bio.face.extractor import PyTorchLoadedModel
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
 import scipy.spatial
@@ -25,9 +25,11 @@ else:
 
 cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
 
+cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
+
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(224, 224),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    cropped_positions={"leye": (110, 144), "reye": (110, 96)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -38,8 +40,14 @@ transform_extra_arguments = (
     else (("annotations", "annotations"),)
 )
 
+transform_extra_arguments = (
+    None
+    if (cropped_positions is None or fixed_positions is not None)
+    else (("annotations", "annotations"),)
+)
+
 
-extractor_transformer = pytorch_loaded_model()
+extractor_transformer = PyTorchLoadedModel()
 
 algorithm = Distance(
     distance_function=scipy.spatial.distance.cosine, is_distance_function=True
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
index 630609e6fb22085783fa8c977ddc9702615d3364..90788963d6440c828778638cbc258eaa702a1376 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import pytorch_library_model
+from bob.bio.face.extractor import PyTorchLibraryModel
 from facenet_pytorch import InceptionResnetV1
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
@@ -26,9 +26,11 @@ else:
 
 cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
 
+cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
+
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(224, 224),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    cropped_positions={"leye": (110, 144), "reye": (110, 96)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -40,8 +42,15 @@ transform_extra_arguments = (
 )
 
 
+transform_extra_arguments = (
+    None
+    if (cropped_positions is None or fixed_positions is not None)
+    else (("annotations", "annotations"),)
+)
+
+
 model = InceptionResnetV1(pretrained="vggface2").eval()
-extractor_transformer = pytorch_library_model(model=model)
+extractor_transformer = PyTorchLibraryModel(model=model)
 
 
 algorithm = Distance(
diff --git a/bob/bio/face/config/baseline/tf_pipe.py b/bob/bio/face/config/baseline/tf_pipe.py
index 22c24d95abcf88bf1acb9c9fe9f587fbd7abeccd..c4ea24a68baad4ff57d57d8f773f249284a01b6b 100644
--- a/bob/bio/face/config/baseline/tf_pipe.py
+++ b/bob/bio/face/config/baseline/tf_pipe.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import tf_model
+from bob.bio.face.extractor import TensorFlowModel
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
 import scipy.spatial
@@ -24,11 +24,11 @@ else:
 
 
 # Preprocessor
-cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
+cropped_positions = {"leye": (80, 100), "reye": (80, 60)}
 
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(160, 160),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    cropped_positions={"leye": (80, 100), "reye": (80, 60)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -41,7 +41,7 @@ transform_extra_arguments = (
 
 
 # Extractor
-extractor_transformer = tf_model()
+extractor_transformer = TensorFlowModel()
 
 # Algorithm
 algorithm = Distance(
diff --git a/bob/bio/face/extractor/mxnet_resnet.py b/bob/bio/face/extractor/MxNetModel.py
similarity index 98%
rename from bob/bio/face/extractor/mxnet_resnet.py
rename to bob/bio/face/extractor/MxNetModel.py
index 73ab9f22a070ac198e2f8d7dfbcb6c23bb1f4221..84e856862ee5a2e02eb2c832e4f7fcb4ad4bc83d 100644
--- a/bob/bio/face/extractor/mxnet_resnet.py
+++ b/bob/bio/face/extractor/MxNetModel.py
@@ -17,7 +17,7 @@ mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
 mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
 
 
-class mxnet_model(TransformerMixin, BaseEstimator):
+class MxNetModel(TransformerMixin, BaseEstimator):
 
     """Extracts features using deep face recognition models under MxNet Interfaces.
   
diff --git a/bob/bio/face/extractor/opencv_caffe.py b/bob/bio/face/extractor/OpenCVModel.py
similarity index 97%
rename from bob/bio/face/extractor/opencv_caffe.py
rename to bob/bio/face/extractor/OpenCVModel.py
index 53ab52a6511985553ce6c70211995dac025a92fa..728b2b7ca94c451ada6b09411e9627964bc8df5d 100644
--- a/bob/bio/face/extractor/opencv_caffe.py
+++ b/bob/bio/face/extractor/OpenCVModel.py
@@ -25,7 +25,7 @@ opencv_model_directory = rc["bob.extractor_model.opencv"]
 opencv_model_prototxt = rc["bob.extractor_weights.opencv"]
 
 
-class opencv_model(TransformerMixin, BaseEstimator):
+class OpenCVModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under OpenCV Interface
 
   Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`. 
@@ -39,8 +39,6 @@ class opencv_model(TransformerMixin, BaseEstimator):
 
     .. note::
        This structure only can be used for CAFFE pretrained model.
-
-
     """
 
     def __init__(self, **kwargs):
@@ -82,10 +80,10 @@ class opencv_model(TransformerMixin, BaseEstimator):
     """
 
         if self.model is None:
-
             self._load_model()
 
         img = np.array(X)
+        img = img / 255
 
         self.model.setInput(img)
 
@@ -99,5 +97,4 @@ class opencv_model(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-
         return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/pytorch_model.py b/bob/bio/face/extractor/PyTorchModel.py
similarity index 92%
rename from bob/bio/face/extractor/pytorch_model.py
rename to bob/bio/face/extractor/PyTorchModel.py
index 2d7a5f1c14e263aac1c1eeed237afb465cee89ee..24e02bc3013e81463c799b8ca95ab6bf0fb5d5f7 100644
--- a/bob/bio/face/extractor/pytorch_model.py
+++ b/bob/bio/face/extractor/PyTorchModel.py
@@ -17,7 +17,7 @@ pytorch_model_directory = rc["bob.extractor_model.pytorch"]
 pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
 
 
-class pytorch_loaded_model(TransformerMixin, BaseEstimator):
+class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
     
   Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`. 
@@ -28,8 +28,6 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
     $ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
   
   The extracted features can be combined with different the algorithms. 
-
-
     """
 
     def __init__(self, **kwargs):
@@ -76,11 +74,11 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
     feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-
         if self.model is None:
             self._load_model()
 
         X = torch.Tensor(X)
+        X = X / 255
 
         return self.model(X).detach().numpy()
 
@@ -92,11 +90,10 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-
         return {"stateless": True, "requires_fit": False}
 
 
-class pytorch_library_model(TransformerMixin, BaseEstimator):
+class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition with registered model frames in the PyTorch Library. 
     
   Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
@@ -105,7 +102,6 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
 
   **Parameters:**
   model: pytorch model calling from library.
-  use_gpu: True or False.
     """
 
     def __init__(self, model=None, **kwargs):
@@ -125,6 +121,11 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
         self.checkpoint_path = checkpoint_path
         self.device = None
 
+    def _load_model(self):
+
+        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        self.model.to(self.device)
+
     def transform(self, X):
         """__call__(image) -> feature
 
@@ -141,7 +142,11 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
       The list of features extracted from the image.
     """
 
+        if self.model is None:
+            self._load_model()
+
         X = torch.Tensor(X)
+        X = X / 255
 
         return self.model(X).detach().numpy()
 
diff --git a/bob/bio/face/extractor/tf_model.py b/bob/bio/face/extractor/TensorFlowModel.py
similarity index 97%
rename from bob/bio/face/extractor/tf_model.py
rename to bob/bio/face/extractor/TensorFlowModel.py
index c690f2acb339c903aa56367e7ceffac8996062c9..697afec88bf8bd382390d023744be332ac8c345d 100644
--- a/bob/bio/face/extractor/tf_model.py
+++ b/bob/bio/face/extractor/TensorFlowModel.py
@@ -18,7 +18,7 @@ from tensorflow import keras
 tf_model_directory = rc["bob.extractor_model.tf"]
 
 
-class tf_model(TransformerMixin, BaseEstimator):
+class TensorFlowModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under TensorFlow Interface.
 
   Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`. 
@@ -28,8 +28,6 @@ class tf_model(TransformerMixin, BaseEstimator):
     $ bob config set bob.extractor_model.tf /PATH/TO/MODEL/
   
   The extracted features can be combined with different the algorithms. 
-
-
     """
 
     def __init__(self, **kwargs):
@@ -76,6 +74,8 @@ class tf_model(TransformerMixin, BaseEstimator):
         X = check_array(X, allow_nd=True)
         X = tf.convert_to_tensor(X)
         X = to_channels_last(X)
+
+        X = X / 255
         predict = self.model.predict(X)
 
         return predict
diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py
index 32b97163775852dbca316f538672be6bccf2095d..d98275bc189a792df01b1031277063bfd6ff82a5 100644
--- a/bob/bio/face/extractor/__init__.py
+++ b/bob/bio/face/extractor/__init__.py
@@ -1,11 +1,11 @@
 from .DCTBlocks import DCTBlocks
 from .GridGraph import GridGraph
 from .LGBPHS import LGBPHS
-from .mxnet_resnet import mxnet_model
-from .pytorch_model import pytorch_loaded_model
-from .pytorch_model import pytorch_library_model
-from .tf_model import tf_model
-from .opencv_caffe import opencv_model
+from .MxNetModel import MxNetModel
+from .PyTorchModel import PyTorchLoadedModel
+from .PyTorchModel import PyTorchLibraryModel
+from .TensorFlowModel import TensorFlowModel
+from .OpenCVModel import OpenCVModel
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
@@ -27,10 +27,10 @@ __appropriate__(
     DCTBlocks,
     GridGraph,
     LGBPHS,
-    mxnet_model,
-    pytorch_loaded_model,
-    pytorch_library_model,
-    tf_model,
-    opencv_model,
+    MxNetModel,
+    PyTorchLoadedModel,
+    PyTorchLibraryModel,
+    TensorFlowModel,
+    OpenCVModel,
 )
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 88c42a629cd6a9eb1ccd18fe92146aa3bbfc499c..4f8d9c2da0043f74340d937ba66169613d803863 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -61,12 +61,14 @@ Deep learning baselines
 Deep Learning with different interfaces baselines
 =================================================
 
-* ``mxnet_pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
+* ``mxnet-pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
 
-* ``pytorch_pipe_v1``: Pytorch network that extracs 1000-dimensional featrues, trained by Manual Gunther, as described in [LGB18]_
+* ``mxnet-tinyface``: Applying `tinyface annoator <https://github.com/chinakook/hr101_mxnet>`_ for the Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
 
-* ``pytorch_pipe_v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
+* ``pytorch-pipe-v1``: Pytorch network that extracts 1000-dimensional features, trained by Manual Gunther, as described in [LGB18]_
 
-* ``tf_pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+* ``pytorch-pipe-v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
 
-* ``opencv_pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_
+* ``tf-pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+
+* ``opencv-pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_
diff --git a/doc/deeplearningextractor.rst b/doc/deeplearningextractor.rst
new file mode 100644
index 0000000000000000000000000000000000000000..466a0fa4fa224cb2a47deb96d11e440df48a69b7
--- /dev/null
+++ b/doc/deeplearningextractor.rst
@@ -0,0 +1,250 @@
+.. vim: set fileencoding=utf-8 :
+.. author: Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
+
+.. _bob.bio.face.deeplearningextractor:
+
+==========================================================================
+Extractors using Pretrained Deep Neural Networks with Different Interfaces
+==========================================================================
+
+
+In this page we introduce how to import and use the pretrained feature-extraction models in a face recognition experiment.
+
+
+
+Extractor 
+---------
+
+As explained in :ref:`bob.bio.base <bob.bio.base>`, the extractor is a transformer that can reduce the dimensionality of preprocessed images so that the resulting feature vectors can ease the classification procedures (calculating the differences between individual’s vectors).  
+ 
+
+ 
+Pretrained Models as Extractors
+-------------------------------
+
+The typical extractors like ``Discrete Cosine Transform (DCT)``, ``Gabor jets in a grid structure`` [GHW12]_, and ``Local Gabor Binary Pattern Histogram Sequences (LGBPHS)`` [ZSG05]_ are used before the deep learning era. To adapt to the deep learning era, the extractors introduced in this page are used to import the pretrained feature extraction models and weights. In other words, we use the preprocessed images as the inputs, import the pretrained model and its corresponding  weights, and plug inputs into the model to return the feature vectors. In this way, users can use their own or downloaded pretrained models to capture features. 
+ 
+.. note::
+  The following Interfaces are supported:
+  
+  MxNet
+  
+  PyTorch: Either import models by hand or call from Library
+  
+  TensorFlow
+  
+  OpenCV
+  
+  
+  Please check the implementation details below.
+
+
+
+Step By Step Instructions
+-------------------------
+
+
+1.   Download your desired model framework and pretrained weights.
+
+2.   Specify the path to models before setting up the experiment configurations. 
+    
+    MxNet
+       
+    .. code-block:: sh
+   
+        $ bob config set bob.extractor_model.mxnet /PATH/TO/MODEL/
+        $ bob config set bob.extractor_weights.mxnet /PATH/TO/WEIGHTS/
+
+    OpenCV:
+       
+    .. code-block:: sh
+   
+        $ bob config set bob.extractor_model.opencv /PATH/TO/MODEL/
+        $ bob config set bob.extractor_weights.opencv /PATH/TO/WEIGHTS/
+
+    PyTorch:
+       
+    .. code-block:: sh
+   
+        $ bob config set bob.extractor_model.pytorch /PATH/TO/MODEL/
+        $ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
+       
+    PyTorch (Call from Library): No need to set the path, please define the model within the configuration file.
+       
+    TensorFlow:
+       
+    .. code-block:: sh
+   
+        $ bob config set bob.extractor_model.tf /PATH/TO/MODEL/
+
+3.   Call the extractor in the configuration file. Example for MxNet:
+       
+    .. code-block:: sh
+   
+        from bob.bio.face.extractor import MxNetModel
+        extractor_transformer = MxNetModel()
+
+4.   Run the pipeline as usual.
+
+
+ 
+Baselines
+---------
+
+The available baselines with the resulting ROC plots  for each interface are listed below.
+
+
+MxNet
+=====
+*  ``mxnet-pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
+
+1.   Set Path of Model
+       
+    .. code-block:: sh
+   
+        $ bob config set bob.extractor_model.mxnet /PATH/TO/MXNETMODEL/
+        $ bob config set bob.extractor_weights.mxnet /PATH/TO/MXNETWEIGHTS/
+
+2.   Call the extractor in the configuration
+       
+    .. code-block:: sh
+   
+        from bob.bio.face.extractor import MxNetModel
+        extractor_transformer = MxNetModel()
+
+In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``cropped_positions={'leye':(49,72), 'reye':(49,38)}`` 
+and ``cropped_image_size=(112,112)`` 
+as preprocessor,  `LResNet50E-IR,ArcFace@ms1m-refine-v1 <https://github.com/deepinsight/insightface/wiki/Model-Zoo#32-lresnet50e-irarcfacems1m-refine-v1>`_ as extractor and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
+
+.. figure:: img/mxnet_lfw_pipe.png
+  :figwidth: 75%
+  :align: center
+  :alt: Face recognition results of LFW database.
+
+  ROC plot for LFW database using the pretrained extractor with MxNet Interface.
+
+
+OpenCV
+======
+* ``opencv-pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_
+
+1.   Set Path of Model
+       
+   .. code-block:: sh
+   
+      $ bob config set bob.extractor_model.opencv /PATH/TO/OPENCVMODEL/
+      $ bob config set bob.extractor_weights.opencv /PATH/TO/OPENCVWEIGHTS/
+
+2.   Call the extractor in the configuration
+       
+   .. code-block:: sh
+   
+      from bob.bio.face.extractor import OpenCVModel
+      extractor_transformer = OpenCVModel()
+
+In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``cropped_positions={"leye": (98, 144), "reye": (98, 76)}`` 
+and ``cropped_image_size=(224,224)`` 
+as preprocessor,  vgg_face_caffe.tar.gz <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_ as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
+
+.. figure:: img/opencv_lfw_pipe.png
+  :figwidth: 75%
+  :align: center
+  :alt: Face recognition results of LFW database.
+
+  ROC plot for LFW database using the pretrained CAFFE extractor with OpenCV Interface.
+
+
+PyTorch
+=======
+* ``pytorch-pipe-v1``: Pytorch network that extracts 1000-dimensional features, trained by Manual Gunther, as described in [LGB18]_
+
+
+1.   Set Path of Model
+       
+   .. code-block:: sh
+   
+      $ bob config set bob.extractor_model.pytorch /PATH/TO/PYTORCHMODEL/
+      $ bob config set bob.extractor_weights.pytorch /PATH/TO/PYTORCHWEIGHTS/
+
+2.   Call the extractor in the configuration
+       
+   .. code-block:: sh
+   
+      from bob.bio.face.extractor import PyTorchLoadedModel
+      extractor_transformer = PyTorchLoadedModel()
+
+In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``cropped_positions={'leye':(110,144), 'reye':(110,96)}``
+ and ``cropped_image_size=(224,224)`` 
+ as preprocessor,  a PyTorch model describ  in [LGB18]_  as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
+
+.. figure:: img/pytorch_lfw_pipe.png
+  :figwidth: 75%
+  :align: center
+  :alt: Face recognition results of LFW database.
+
+  ROC plot for LFW database using the pretrained extractor with PyTorch Interface.
+
+
+PyTorch Import from Library
+===========================
+* ``pytorch-pipe-v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
+
+
+1.   Call the extractor and model in the configuration
+       
+    .. code-block:: sh
+   
+        from bob.bio.face.extractor import PyTorchLoadedModel
+        from facenet_pytorch import InceptionResnetV1
+        model = InceptionResnetV1(pretrained='vggface2').eval()
+        extractor_transformer = PyTorchLibraryModel(model=model)
+
+
+In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``cropped_positions={'leye':(110,144), 'reye':(110,96)}`` 
+and ``cropped_image_size=(224,224)`` 
+as preprocessor,  `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_  as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
+
+.. figure:: img/pytorch_library_lfw_pipe.png
+  :figwidth: 75%
+  :align: center
+  :alt: Face recognition results of LFW database.
+
+  ROC plot for LFW database using the pretrained extractor called directly from PyTorch library.
+
+
+TensorFlow
+==========
+* ``tf-pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+
+1.   Set Path of Model
+       
+    .. code-block:: sh
+   
+        $ bob config set bob.extractor_model.pytorch /PATH/TO/PYTORCHMODEL/
+        $ bob config set bob.extractor_weights.pytorch /PATH/TO/PYTORCHWEIGHTS/
+
+2.   Call the extractor in the configuration
+       
+    .. code-block:: sh
+   
+        from bob.bio.face.extractor import PyTorchLoadedModel
+        extractor_transformer = PyTorchLoadedModel()
+
+In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``cropped_positions={'leye':(80,100), 'reye':(80,60)}`` 
+and ``cropped_image_size=(160,160)`` 
+as preprocessor,  Inception Resnet v2  in [TFP18]_ as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
+
+.. figure:: img/tensorflow_pipe.png
+  :figwidth: 75%
+  :align: center
+  :alt: Face recognition results of LFW database.
+
+  ROC plot for LFW database using the pretrained extractor with TensorFlow Interface.
+
+
+
+Special Case: What if none of the above interfaces are compatible with  your model?
+-----------------------------------------------------------------------------------
+Please use `MMDNN  <https://github.com/microsoft/MMdnn>_` to convert your model into the available interfaces.
+
+
diff --git a/doc/img/mxnet_lfw_pipe.png b/doc/img/mxnet_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..802d11f72d753bb832a9d65760e03e3338115c6f
Binary files /dev/null and b/doc/img/mxnet_lfw_pipe.png differ
diff --git a/doc/img/opencv_lfw_pipe.png b/doc/img/opencv_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..024381d9ba1b56307e77c67461659cfc2134c6f2
Binary files /dev/null and b/doc/img/opencv_lfw_pipe.png differ
diff --git a/doc/img/pytorch_lfw_pipe.png b/doc/img/pytorch_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..c68d7b53e6306d0d15ed78cdb421df6db2f14085
Binary files /dev/null and b/doc/img/pytorch_lfw_pipe.png differ
diff --git a/doc/img/pytorch_library_lfw_pipe.png b/doc/img/pytorch_library_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..162a148c0ceaa545b1f8654e28a6f5fbb91b3394
Binary files /dev/null and b/doc/img/pytorch_library_lfw_pipe.png differ
diff --git a/doc/img/tensorflow_pipe.png b/doc/img/tensorflow_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..29dbc0fabfe9c0e9d5a47cb5b67fa6921dfca0de
Binary files /dev/null and b/doc/img/tensorflow_pipe.png differ
diff --git a/doc/implemented.rst b/doc/implemented.rst
index 75acc7129ef0132eb8ad762c73ddf7e53cb5b204..7a3074a4f92189f275caa5708a094ce8e6b07f75 100644
--- a/doc/implemented.rst
+++ b/doc/implemented.rst
@@ -13,6 +13,7 @@ Databases
 .. autosummary::
    bob.bio.face.database.ARFaceBioDatabase
    bob.bio.face.database.AtntBioDatabase
+   bob.bio.face.database.CasiaAfricaDatabase
    bob.bio.face.database.MobioDatabase
    bob.bio.face.database.ReplayBioDatabase
    bob.bio.face.database.ReplayMobileBioDatabase
@@ -57,11 +58,11 @@ Image Feature Extractors
    bob.bio.face.extractor.DCTBlocks
    bob.bio.face.extractor.GridGraph
    bob.bio.face.extractor.LGBPHS
-   bob.bio.face.extractor.mxnet_model
-   bob.bio.face.extractor.pytorch_loaded_model
-   bob.bio.face.extractor.pytorch_library_model
-   bob.bio.face.extractor.tf_model
-   bob.bio.face.extractor.opencv_model
+   bob.bio.face.extractor.MxNetModel
+   bob.bio.face.extractor.PyTorchLoadedModel
+   bob.bio.face.extractor.PyTorchLibraryModel
+   bob.bio.face.extractor.TensorFlowModel
+   bob.bio.face.extractor.OpenCVModel
 
 Face Recognition Algorithms
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/index.rst b/doc/index.rst
index 776d35c6c8d8eaf706e1ba713f3fc8f1c75eee69..0db6b94a88e964323774ceeafb38d1276a1f78ed 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -49,6 +49,7 @@ Users Guide
 
    baselines
    leaderboard/leaderboard
+   deeplearningextractor
    references
    annotators
    faq
diff --git a/doc/references.rst b/doc/references.rst
index bbb5417a0b6d3bf20a07c65c115921c57437ff0f..7d6d919cf3fd7011b4165ca381b211d023c93c95 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -17,4 +17,4 @@ References
 .. [ZSQ09]  *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
 .. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
 .. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
-.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), Lake Tahoe, NV, USA, 2018, pp. 131-140, doi: 10.1109/WACV.2018.00021.
\ No newline at end of file
+.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. IEEE Winter Conference on Applications of Computer Vision (WACV), 2018.
diff --git a/setup.py b/setup.py
index 8a1c7be330b11bceddfba1b65ed1c3cfc1b132bd..13811f157cca4b27a5710c794724cbd78cfa1ce8 100644
--- a/setup.py
+++ b/setup.py
@@ -113,8 +113,11 @@ setup(
             "meds = bob.bio.face.config.database.meds:database",
             "morph = bob.bio.face.config.database.morph:database",
             "casia-africa = bob.bio.face.config.database.casia_africa:database",
+<<<<<<< HEAD
             "pola-thermal = bob.bio.face.config.database.pola_thermal:database",
             "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2:database",
+=======
+>>>>>>> 2
         ],
         "bob.bio.annotator": [
             "facedetect               = bob.bio.face.config.annotator.facedetect:annotator",
@@ -139,9 +142,10 @@ setup(
             "lgbphs = bob.bio.face.config.baseline.lgbphs:transformer",
             "dummy = bob.bio.face.config.baseline.dummy:transformer",
             "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:transformer",
+            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:transformer",
             "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:transformer",
             "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:transformer",
-            "tf-pipe = bob.bio.face.config.baseline.ty_pipe:transformer",
+            "tf-pipe = bob.bio.face.config.baseline.tf_pipe:transformer",
             "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:transformer",
         ],
         # baselines
@@ -157,11 +161,13 @@ setup(
             "lda = bob.bio.face.config.baseline.lda:pipeline",
             "dummy = bob.bio.face.config.baseline.dummy:pipeline",
             "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline",
+            "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021:pipeline",
             "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
             "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:pipeline",
+            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:pipeline",
             "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:pipeline",
             "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:pipeline",
-            "tf-pipe = bob.bio.face.config.baseline.ty_pipe:pipeline",
+            "tf-pipe = bob.bio.face.config.baseline.tf_pipe:pipeline",
             "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:pipeline",
         ],
         "bob.bio.config": [
@@ -175,9 +181,10 @@ setup(
             "lgbphs = bob.bio.face.config.baseline.lgbphs",
             "lda = bob.bio.face.config.baseline.lda",
             "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe",
+            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface",
             "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1",
             "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2",
-            "tf-pipe = bob.bio.face.config.baseline.ty_pipe",
+            "tf-pipe = bob.bio.face.config.baseline.tf_pipe",
             "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe",
             "arface            = bob.bio.face.config.database.arface",
             "atnt              = bob.bio.face.config.database.atnt",
@@ -195,11 +202,13 @@ setup(
             "replaymobile-img-spoof  = bob.bio.face.config.database.replaymobile_spoof",
             "fargo  = bob.bio.face.config.database.fargo",
             "meds = bob.bio.face.config.database.meds",
+            "casia-africa = bob.bio.face.config.database.casia_africa",
             "morph = bob.bio.face.config.database.morph",
             "casia-africa = bob.bio.face.config.database.casia_africa",
             "pola-thermal = bob.bio.face.config.database.pola_thermal",
             "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2",
             "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021",
+            "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021",
             "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
         ],
         "bob.bio.cli": [