diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py
index fc06d274b7b23bf52058e15574a9af8d5c6d9c3a..653d5d9b7a97ee26619112c2bcfc20cdad201497 100644
--- a/bob/bio/face/annotator/bobiptinyface.py
+++ b/bob/bio/face/annotator/bobiptinyface.py
@@ -29,4 +29,4 @@ class BobIpTinyface(Base):
         if annotations is not None:
             return annotations[0]
         else:
-            return None
+            return None
\ No newline at end of file
diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py
index b28ccd595c0e4a22aa7be65827055d5d8fbaf0fd..f3c9c2f175ea5bfd5849615fcb8dbfa42b211502 100644
--- a/bob/bio/face/config/baseline/mxnet_pipe.py
+++ b/bob/bio/face/config/baseline/mxnet_pipe.py
@@ -61,3 +61,5 @@ transformer = make_pipeline(
 # Assemble the Vanilla Biometric pipeline and execute
 pipeline = VanillaBiometricsPipeline(transformer, algorithm)
 transformer = pipeline.transformer
+
+
diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py
index 8d8c63f8dc409f59990792c31a401166ad5a4e16..aafe25f7adb8158115fe50cac1b5e890064bbb4d 100644
--- a/bob/bio/face/config/baseline/opencv_pipe.py
+++ b/bob/bio/face/config/baseline/opencv_pipe.py
@@ -1,7 +1,7 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
 from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
-from bob.bio.face.extractor import opencv_model
+from bob.bio.face.extractor import OpenCVModel
 from bob.bio.base.extractor import Extractor
 from bob.bio.base.transformers import ExtractorTransformer
 from bob.bio.base.algorithm import Distance
@@ -35,11 +35,11 @@ preprocessor_transformer = FaceCrop(
     fixed_positions=fixed_positions,
 )
 
-cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
+cropped_positions = {"leye": (98, 144), "reye": (98, 76)}
 # Preprocessor
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(224, 224),
-    cropped_positions={"leye": (100, 140), "reye": (100, 95)},
+    cropped_positions={"leye": (98, 144), "reye": (98, 76)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -52,7 +52,7 @@ transform_extra_arguments = (
 
 
 # Extractor
-extractor_transformer = opencv_model()
+extractor_transformer = OpenCVModel()
 
 
 # Algorithm
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
index 843e53818a851b7f1cee481c7c011eeff8f4c8c8..a6b38a33461b04a908cb2b9fbfc599a50422ed61 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
@@ -69,3 +69,4 @@ transformer = make_pipeline(
 # Assemble the Vanilla Biometric pipeline and execute
 pipeline = VanillaBiometricsPipeline(transformer, algorithm)
 transformer = pipeline.transformer
+
diff --git a/bob/bio/face/extractor/MxNetModel.py b/bob/bio/face/extractor/MxNetModel.py
index dd3ca8da0c8151b6a131d17b28f8e38347230177..b35750cd2e5896a45551450c896555968b32e7de 100644
--- a/bob/bio/face/extractor/MxNetModel.py
+++ b/bob/bio/face/extractor/MxNetModel.py
@@ -94,4 +94,4 @@ class MxNetModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
+        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/OpenCVModel.py b/bob/bio/face/extractor/OpenCVModel.py
index 628a98ce954e051a81edc446ad9c85e42a865996..3fbdb499d5970a4c3757a6d2f250c4d7149b7589 100644
--- a/bob/bio/face/extractor/OpenCVModel.py
+++ b/bob/bio/face/extractor/OpenCVModel.py
@@ -99,4 +99,4 @@ class OpenCVModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
+        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/PyTorchModel.py b/bob/bio/face/extractor/PyTorchModel.py
index cf46de660a246e2d7beda9d13c0b255882d9eb62..883e19c8c243cad3f33fabd27dabe9166856c888 100644
--- a/bob/bio/face/extractor/PyTorchModel.py
+++ b/bob/bio/face/extractor/PyTorchModel.py
@@ -165,4 +165,4 @@ class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
+        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/TensorFlowModel.py b/bob/bio/face/extractor/TensorFlowModel.py
index dca1bc9a9104d31dc94835a10d60b780dd84d4ea..b1ddcdd55f947c5aacbb18d1beab015852dd28e3 100644
--- a/bob/bio/face/extractor/TensorFlowModel.py
+++ b/bob/bio/face/extractor/TensorFlowModel.py
@@ -88,4 +88,4 @@ class TensorFlowModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
+        return {"stateless": True, "requires_fit": False}
diff --git a/doc/deeplearningextractor.rst b/doc/deeplearningextractor.rst
index 466a0fa4fa224cb2a47deb96d11e440df48a69b7..378a35d8d292155e9db8790461ac3ded54bdbe95 100644
--- a/doc/deeplearningextractor.rst
+++ b/doc/deeplearningextractor.rst
@@ -234,7 +234,7 @@ In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``
 and ``cropped_image_size=(160,160)`` 
 as preprocessor,  Inception Resnet v2  in [TFP18]_ as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
 
-.. figure:: img/tensorflow_pipe.png
+.. figure:: img/tensorflow_lfw_pipe.png
   :figwidth: 75%
   :align: center
   :alt: Face recognition results of LFW database.
diff --git a/doc/img/opencv_lfw_pipe.png b/doc/img/opencv_lfw_pipe.png
index 024381d9ba1b56307e77c67461659cfc2134c6f2..4d67b3c175e954919118fbd76a36c897ed7c0bba 100644
Binary files a/doc/img/opencv_lfw_pipe.png and b/doc/img/opencv_lfw_pipe.png differ
diff --git a/doc/implemented.rst b/doc/implemented.rst
index 7a3074a4f92189f275caa5708a094ce8e6b07f75..d45920982be0ba38038160d2c498e352b4f37e6f 100644
--- a/doc/implemented.rst
+++ b/doc/implemented.rst
@@ -64,6 +64,7 @@ Image Feature Extractors
    bob.bio.face.extractor.TensorFlowModel
    bob.bio.face.extractor.OpenCVModel
 
+
 Face Recognition Algorithms
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
 
diff --git a/doc/references.rst b/doc/references.rst
index 7d6d919cf3fd7011b4165ca381b211d023c93c95..cffb6440d82d908a3869dc61bc66240b5b1ec38b 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -17,4 +17,4 @@ References
 .. [ZSQ09]  *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
 .. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
 .. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
-.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. IEEE Winter Conference on Applications of Computer Vision (WACV), 2018.
+.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. IEEE Winter Conference on Applications of Computer Vision (WACV), 2018.
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index bcd1763c12212b5e0c1ea6a1bbe82674f26f7f28..aa33f11f510faa657337800b0969ed16d350f4b8 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,5 +21,7 @@ matplotlib   # for plotting
 mxnet
 opencv-python
 six
+mxnet
+opencv-python
 scikit-image
 scikit-learn # for pipelines Tranformers
\ No newline at end of file