diff --git a/bob/bio/face/annotator/Base.py b/bob/bio/face/annotator/Base.py
index 901e3919fe5d454d0a98f2dd3b943260a2f0fd95..880b243e8cc13aff9274fabc6c6421b84de1cb3d 100644
--- a/bob/bio/face/annotator/Base.py
+++ b/bob/bio/face/annotator/Base.py
@@ -1,5 +1,5 @@
 import bob.bio.base.annotator
-import bob.bio.face.preprocessor  # import for documentation
+from bob.bio.base.annotator.FailSafe import translate_kwargs
 
 
 class Base(bob.bio.base.annotator.Annotator):
@@ -21,3 +21,22 @@ class Base(bob.bio.base.annotator.Annotator):
             The extra arguments that may be passed.
         """
         raise NotImplementedError()
+
+    def transform(self, samples, **kwargs):
+        """Annotates an image and returns annotations in a dictionary.
+
+        All annotator should add at least the ``topleft`` and ``bottomright``
+        coordinates. Some currently known annotation points such as ``reye``
+        and ``leye`` are formalized in
+        :any:`bob.bio.face.preprocessor.FaceCrop`.
+
+        Parameters
+        ----------
+        sample : Sample
+            The image int the sample object should be a Bob format
+            (#Channels, Height, Width) RGB image.
+        **kwargs
+            Extra arguments that may be passed.
+        """
+        kwargs = translate_kwargs(kwargs, len(samples))
+        return [self.annotate(sample, **kw) for sample, kw in zip(samples, kwargs)]
diff --git a/bob/bio/face/annotator/bobipfacedetect.py b/bob/bio/face/annotator/bobipfacedetect.py
index a12af8a119ad41f45ceaaa9eeab07cd9d90ad1c0..401e01d75449e45854d664b8daac18f1b1a4aeb7 100644
--- a/bob/bio/face/annotator/bobipfacedetect.py
+++ b/bob/bio/face/annotator/bobipfacedetect.py
@@ -41,9 +41,6 @@ class BobIpFacedetect(Base):
                  eye_estimate=False,
                  **kwargs):
         super(BobIpFacedetect, self).__init__(**kwargs)
-        self.sampler = bob.ip.facedetect.Sampler(
-            scale_factor=scale_base, lowest_scale=lowest_scale,
-            distance=distance)
         if cascade is None:
             self.cascade = bob.ip.facedetect.default_cascade()
         else:
@@ -51,6 +48,15 @@ class BobIpFacedetect(Base):
                 bob.io.base.HDF5File(cascade))
         self.detection_overlap = detection_overlap
         self.eye_estimate = eye_estimate
+        self.scale_base = scale_base
+        self.lowest_scale = lowest_scale
+        self.distance = distance
+        self.fit()
+
+    def fit(self, X=None, y=None, **kwargs):
+        self.sampler_ = bob.ip.facedetect.Sampler(
+            scale_factor=self.scale_base, lowest_scale=self.lowest_scale,
+            distance=self.distance)
 
     def annotate(self, image, **kwargs):
         """Return topleft and bottomright and expected eye positions
@@ -71,7 +77,7 @@ class BobIpFacedetect(Base):
         if image.ndim == 3:
             image = bob.ip.color.rgb_to_gray(image)
         bbx, quality = bob.ip.facedetect.detect_single_face(
-            image, self.cascade, self.sampler, self.detection_overlap)
+            image, self.cascade, self.sampler_, self.detection_overlap)
 
         landmarks = bounding_box_to_annotations(bbx)
         landmarks['quality'] = quality
diff --git a/bob/bio/face/annotator/bobipflandmark.py b/bob/bio/face/annotator/bobipflandmark.py
index 2fa5ad712faefa75c8cac31d6964699cc662fcf2..976a467cce9b8d0c9d5be9f6ace41db2b286b670 100644
--- a/bob/bio/face/annotator/bobipflandmark.py
+++ b/bob/bio/face/annotator/bobipflandmark.py
@@ -1,6 +1,5 @@
 from . import Base
 import bob.ip.color
-import bob.ip.flandmark
 
 
 class BobIpFlandmark(Base):
@@ -21,6 +20,7 @@ class BobIpFlandmark(Base):
 
     def __init__(self, **kwargs):
         super(BobIpFlandmark, self).__init__(**kwargs)
+        import bob.ip.flandmark
         self.flandmark = bob.ip.flandmark.Flandmark()
 
     def annotate(self, image, annotations, **kwargs):
diff --git a/bob/bio/face/annotator/bobipmtcnn.py b/bob/bio/face/annotator/bobipmtcnn.py
index 6c37941b77ce0b1c838cf1bd55d0e29b27dddf37..07ed904bba728c32248301b3233d832ff082ef52 100644
--- a/bob/bio/face/annotator/bobipmtcnn.py
+++ b/bob/bio/face/annotator/bobipmtcnn.py
@@ -2,12 +2,25 @@ from . import Base
 
 
 class BobIpMTCNN(Base):
-    """Annotator using mtcnn in bob.ip.tensorflow_extractor"""
+    """Annotator using mtcnn in bob.ip.facedetect"""
 
-    def __init__(self, **kwargs):
+    def __init__(self, min_size=40, factor=0.709, thresholds=(0.6, 0.7, 0.7), **kwargs):
         super(BobIpMTCNN, self).__init__(**kwargs)
-        from bob.ip.tensorflow_extractor import MTCNN
-        self.detector = MTCNN()
+        from bob.ip.facedetect.mtcnn import MTCNN
+
+        self.detector = MTCNN(min_size=min_size, factor=factor, thresholds=thresholds)
+
+    @property
+    def min_size(self):
+        return self.detector.min_size
+
+    @property
+    def factor(self):
+        return self.detector.factor
+
+    @property
+    def thresholds(self):
+        return self.detector.thresholds
 
     def annotate(self, image, **kwargs):
         """Annotates an image using mtcnn
@@ -26,9 +39,6 @@ class BobIpMTCNN(Base):
             mouthleft, mouthright, quality).
         """
         # return the annotations for the first/largest face
-
-
-
         annotations = self.detector.annotations(image)
 
         if annotations:
diff --git a/bob/bio/face/config/annotator/flandmark.py b/bob/bio/face/config/annotator/flandmark.py
index e473b8d8506cf770f5134c8bb1001bbb7c588711..863b9cae69b86c7a5e38bae4a6eb781bc27a8a1e 100644
--- a/bob/bio/face/config/annotator/flandmark.py
+++ b/bob/bio/face/config/annotator/flandmark.py
@@ -1,6 +1,7 @@
 from bob.bio.base.annotator import FailSafe
 from bob.bio.face.annotator import BobIpFacedetect, BobIpFlandmark
 
+# FLandmark requires the 'topleft' and 'bottomright' annotations
 annotator = FailSafe(
     [BobIpFacedetect(), BobIpFlandmark()],
     required_keys=('reye', 'leye'))
diff --git a/bob/bio/face/test/test_annotators.py b/bob/bio/face/test/test_annotators.py
index fa24275b616489a2d1e3684639e8eea15e778db2..f70d22b8b37138502583729692a01e4f93ea50c3 100644
--- a/bob/bio/face/test/test_annotators.py
+++ b/bob/bio/face/test/test_annotators.py
@@ -2,47 +2,83 @@ import bob.io.base
 import bob.io.base.test_utils
 import bob.io.image
 from bob.bio.face.annotator import (
-    BobIpFacedetect, BobIpFlandmark,
+    BobIpFacedetect,
+    BobIpFlandmark,
     min_face_size_validator)
 from bob.bio.base.annotator import FailSafe
+from bob.bio.face.annotator import BobIpMTCNN
 import numpy
 
-face_image = bob.io.base.load(bob.io.base.test_utils.datafile(
-    'testimage.jpg', 'bob.ip.facedetect'))
+from bob.bio.base.test.utils import is_library_available
 
+face_image = bob.io.base.load(
+    bob.io.base.test_utils.datafile(
+        'testimage.jpg', 'bob.ip.facedetect'
+    )
+)
+
+def _assert_mtcnn(annot):
+    """
+    Verifies that the MTCNN annotations are correct for ``faceimage.jpg``
+    """
+    assert type(annot) is dict, annot
+    assert [int(x) for x in annot['topleft']] == [68, 76], annot
+    assert [int(x) for x in annot['bottomright']] == [344, 274], annot
+    assert [int(x) for x in annot['reye']] == [180, 129], annot
+    assert [int(x) for x in annot['leye']] == [175, 220], annot
+    assert numpy.allclose(annot['quality'], 0.9998975), annot
 
 def _assert_bob_ip_facedetect(annot):
     assert annot['topleft'] == (110, 82), annot
     assert annot['bottomright'] == (334, 268), annot
     assert numpy.allclose(annot['quality'], 39.209601948013685), annot
 
+@is_library_available("tensorflow")
+def test_mtcnn_annotator():
+    """
+    The MTCNN annotator should return the correct annotations.
+    """
+    mtcnn_annotator = BobIpMTCNN()
+    batch = [face_image]
+    annot_batch = mtcnn_annotator(batch)
+    _assert_mtcnn(annot_batch[0])
+
+def test_bob_ip_facedetect():
+    batch = [face_image]
+    annot = BobIpFacedetect()(batch)
+    _assert_bob_ip_facedetect(annot[0])
+
+def test_bob_ip_facedetect_eyes():
+    batch = [face_image]
+    annot = BobIpFacedetect(eye_estimate=True)(batch)
+    _assert_bob_ip_facedetect(annot[0])
+    assert [int(x) for x in annot[0]['reye']] == [175, 128], annot
+    assert [int(x) for x in annot[0]['leye']] == [175, 221], annot
+
+def test_fail_safe():
+    annotator = FailSafe(
+        [BobIpFacedetect(eye_estimate=True)],
+        required_keys=('reye', 'leye'),
+    )
+    batch = [face_image]
+    annot = annotator(batch)
+    _assert_bob_ip_facedetect(annot[0])
+    assert [int(x) for x in annot[0]['reye']] == [175, 128], annot
+    assert [int(x) for x in annot[0]['leye']] == [175, 221], annot
 
-def notest_bob_ip_facedetect():
-    annot = BobIpFacedetect()(face_image)
-    _assert_bob_ip_facedetect(annot)
-
-
-def notest_bob_ip_facedetect_eyes():
-    annot = BobIpFacedetect(eye_estimate=True)(face_image)
-    _assert_bob_ip_facedetect(annot)
-    assert [int(x) for x in annot['reye']] == [175, 128], annot
-    assert [int(x) for x in annot['leye']] == [175, 221], annot
-
-
-def notest_bob_ip_flandmark():
+def test_bob_ip_flandmark():
     annotator = FailSafe(
         [BobIpFacedetect(), BobIpFlandmark()],
         required_keys=('reye', 'leye'),
     )
-
-    annot = annotator(face_image)
-
-    _assert_bob_ip_facedetect(annot)
-    assert [int(x) for x in annot['reye']] == [183, 127], annot
-    assert [int(x) for x in annot['leye']] == [174, 223], annot
-
-
-def notest_min_face_size_validator():
+    batch = [face_image]
+    annot = annotator(batch)
+    print(annot)
+    _assert_bob_ip_facedetect(annot[0])
+    assert [int(x) for x in annot[0]['reye']] == [183, 127], annot
+    assert [int(x) for x in annot[0]['leye']] == [174, 223], annot
+
+def test_min_face_size_validator():
     valid = {
         'topleft': (0, 0),
         'bottomright': (32, 32),
diff --git a/requirements.txt b/requirements.txt
index 85daa8e08d1c6660e3d11fe9d6f798e3a18814ce..c31f24d176719e2b479f685e6ffa1149362a5039 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -20,4 +20,5 @@ bob.ip.facedetect
 bob.pipelines
 matplotlib   # for plotting
 six
-scikit-image
\ No newline at end of file
+scikit-image
+scikit-learn # for pipelines Tranformers
diff --git a/setup.py b/setup.py
index b433bbecab1cf44c8ac11586ad1ba6c29edae0c1..a5f1e4147cd022eea0bd58f2dd9850dbcc7dc8f5 100644
--- a/setup.py
+++ b/setup.py
@@ -122,7 +122,6 @@ setup(
             'replay-img-spoof  = bob.bio.face.config.database.replay:replay_spoof',
             'replaymobile-img-licit  = bob.bio.face.config.database.replaymobile:replaymobile_licit',
             'replaymobile-img-spoof  = bob.bio.face.config.database.replaymobile:replaymobile_spoof',
-
             'fargo  = bob.bio.face.config.database.fargo:database',
         ],
 
@@ -134,6 +133,10 @@ setup(
         ],
 
         'bob.bio.transformer':[
+          'facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:transformer',
+          'facedetect = bob.bio.face.config.annotator.facedetect:transformer',
+          'flandmark = bob.bio.face.config.annotator.flandmark:annotator',
+          'mtcnn = bob.bio.face.config.annotator.mtcnn:transformer',
           'facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:transformer',
           'inception-resnetv1-casiawebface = bob.bio.face.config.baseline.inception_resnetv1_casiawebface:transformer',
           'inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer',