diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py
index fc06d274b7b23bf52058e15574a9af8d5c6d9c3a..3b988a15c06693bafcac769a418c9e35a1a84c2b 100644
--- a/bob/bio/face/annotator/bobiptinyface.py
+++ b/bob/bio/face/annotator/bobiptinyface.py
@@ -26,6 +26,8 @@ class BobIpTinyface(Base):
             Annotations with (topleft, bottomright) keys (or None).
         """
 
+        annotations = self.tinyface.detect(image)
+
         if annotations is not None:
             return annotations[0]
         else:
diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py
index b28ccd595c0e4a22aa7be65827055d5d8fbaf0fd..5d97d12fbe84de8d181b093eab3d8d50142e4212 100644
--- a/bob/bio/face/config/baseline/mxnet_pipe.py
+++ b/bob/bio/face/config/baseline/mxnet_pipe.py
@@ -61,3 +61,4 @@ transformer = make_pipeline(
 # Assemble the Vanilla Biometric pipeline and execute
 pipeline = VanillaBiometricsPipeline(transformer, algorithm)
 transformer = pipeline.transformer
+
diff --git a/bob/bio/face/config/baseline/mxnet_tinyface.py b/bob/bio/face/config/baseline/mxnet_tinyface.py
new file mode 100644
index 0000000000000000000000000000000000000000..451412aa10f808b4a2858355b152ba8207e53449
--- /dev/null
+++ b/bob/bio/face/config/baseline/mxnet_tinyface.py
@@ -0,0 +1,32 @@
+import bob.bio.base
+from bob.bio.face.preprocessor import FaceCrop
+from bob.bio.face.annotator import BobIpTinyface
+from bob.bio.face.extractor import MxNetModel
+
+from bob.bio.base.algorithm import Distance
+from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
+import scipy.spatial
+from bob.bio.base.pipelines.vanilla_biometrics import Distance
+from sklearn.pipeline import make_pipeline
+from bob.pipelines import wrap
+from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
+
+
+annotator_transformer = BobIpTinyface()
+
+preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',annotator=annotator_transformer)
+
+extractor_transformer = MxNetModel()
+
+
+algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
+
+transformer = make_pipeline(
+    wrap(["sample"], preprocessor_transformer),
+    wrap(["sample"], extractor_transformer)
+)
+
+pipeline = VanillaBiometricsPipeline(transformer, algorithm)
+transformer = pipeline.transformer
+
+
diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py
index 8d8c63f8dc409f59990792c31a401166ad5a4e16..afe4c9405b1f7443f9e66881e3b1a91bc2f8801d 100644
--- a/bob/bio/face/config/baseline/opencv_pipe.py
+++ b/bob/bio/face/config/baseline/opencv_pipe.py
@@ -1,7 +1,7 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
 from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
-from bob.bio.face.extractor import opencv_model
+from bob.bio.face.extractor import OpenCVModel
 from bob.bio.base.extractor import Extractor
 from bob.bio.base.transformers import ExtractorTransformer
 from bob.bio.base.algorithm import Distance
@@ -35,15 +35,6 @@ preprocessor_transformer = FaceCrop(
     fixed_positions=fixed_positions,
 )
 
-cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
-# Preprocessor
-preprocessor_transformer = FaceCrop(
-    cropped_image_size=(224, 224),
-    cropped_positions={"leye": (100, 140), "reye": (100, 95)},
-    color_channel="rgb",
-    fixed_positions=fixed_positions,
-)
-
 transform_extra_arguments = (
     None
     if (cropped_positions is None or fixed_positions is not None)
@@ -52,7 +43,7 @@ transform_extra_arguments = (
 
 
 # Extractor
-extractor_transformer = opencv_model()
+extractor_transformer = OpenCVModel()
 
 
 # Algorithm
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
index 843e53818a851b7f1cee481c7c011eeff8f4c8c8..250c872f3b0913c62baaf35f232498326b62bc9c 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
@@ -23,8 +23,6 @@ else:
     fixed_positions = None
 
 
-cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
-
 cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
 
 preprocessor_transformer = FaceCrop(
@@ -40,12 +38,6 @@ transform_extra_arguments = (
     else (("annotations", "annotations"),)
 )
 
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
 
 extractor_transformer = PyTorchLoadedModel()
 
@@ -69,3 +61,4 @@ transformer = make_pipeline(
 # Assemble the Vanilla Biometric pipeline and execute
 pipeline = VanillaBiometricsPipeline(transformer, algorithm)
 transformer = pipeline.transformer
+
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
index 90788963d6440c828778638cbc258eaa702a1376..a65c6c43a9277432b5f6b6ea86160599f784c775 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
@@ -24,8 +24,6 @@ else:
     fixed_positions = None
 
 
-cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
-
 cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
 
 preprocessor_transformer = FaceCrop(
@@ -42,13 +40,6 @@ transform_extra_arguments = (
 )
 
 
-transform_extra_arguments = (
-    None
-    if (cropped_positions is None or fixed_positions is not None)
-    else (("annotations", "annotations"),)
-)
-
-
 model = InceptionResnetV1(pretrained="vggface2").eval()
 extractor_transformer = PyTorchLibraryModel(model=model)
 
diff --git a/doc/deeplearningextractor.rst b/doc/deeplearningextractor.rst
index 466a0fa4fa224cb2a47deb96d11e440df48a69b7..378a35d8d292155e9db8790461ac3ded54bdbe95 100644
--- a/doc/deeplearningextractor.rst
+++ b/doc/deeplearningextractor.rst
@@ -234,7 +234,7 @@ In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``
 and ``cropped_image_size=(160,160)`` 
 as preprocessor,  Inception Resnet v2  in [TFP18]_ as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
 
-.. figure:: img/tensorflow_pipe.png
+.. figure:: img/tensorflow_lfw_pipe.png
   :figwidth: 75%
   :align: center
   :alt: Face recognition results of LFW database.
diff --git a/doc/img/tensorflow_lfw_pipe.png b/doc/img/tensorflow_lfw_pipe.png
new file mode 100644
index 0000000000000000000000000000000000000000..29dbc0fabfe9c0e9d5a47cb5b67fa6921dfca0de
Binary files /dev/null and b/doc/img/tensorflow_lfw_pipe.png differ
diff --git a/requirements.txt b/requirements.txt
index bcd1763c12212b5e0c1ea6a1bbe82674f26f7f28..aa33f11f510faa657337800b0969ed16d350f4b8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,5 +21,7 @@ matplotlib   # for plotting
 mxnet
 opencv-python
 six
+mxnet
+opencv-python
 scikit-image
 scikit-learn # for pipelines Tranformers
\ No newline at end of file