diff --git a/MANIFEST.in b/MANIFEST.in
index 172e30ee70074a8df8491258c549fd433f40cca1..e914578401a79a1279f0e671df9cbf43a2fc948c 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,7 @@
 include README.rst bootstrap-buildout.py buildout.cfg develop.cfg COPYING requirements.txt version.txt
 recursive-include doc doc/plot doc/img *.py *.rst *.png *.ico
 recursive-include bob/ip/facedetect *.cpp *.h
-recursive-include bob/ip/facedetect/data *.jpg *.pos *.hdf5
+recursive-include bob/ip/facedetect/data *.jpg *.pos *.hdf5 *.png *.pb
 include bob/ip/facedetect/MCT_cascade.hdf5
 recursive-include bob/learn/boosting *.h *.cpp
 recursive-include bob/learn/boosting/data *.hdf5 *.tar.bz2
diff --git a/bob/ip/facedetect/__init__.py b/bob/ip/facedetect/__init__.py
index 3f7b0aad3a936063767e3086fbcc9ca65c583bae..a30b76acb2d34694b31555adef33f33c62357670 100644
--- a/bob/ip/facedetect/__init__.py
+++ b/bob/ip/facedetect/__init__.py
@@ -12,7 +12,6 @@ from .train import *
 
 from .detect import default_cascade, average_detections, best_detection, detect_single_face, detect_all_faces
 
-
 def get_config():
   """Returns a string containing the configuration information.
   """
diff --git a/bob/ip/facedetect/data/mtcnn.pb b/bob/ip/facedetect/data/mtcnn.pb
new file mode 100644
index 0000000000000000000000000000000000000000..4cc80c6c9b6bd45bb6c64e030a8433bed2726b30
Binary files /dev/null and b/bob/ip/facedetect/data/mtcnn.pb differ
diff --git a/bob/ip/facedetect/data/test_image_multi_face.png b/bob/ip/facedetect/data/test_image_multi_face.png
new file mode 100644
index 0000000000000000000000000000000000000000..f42a01af81fae4cb02e73e90a2f1cc0297b07822
Binary files /dev/null and b/bob/ip/facedetect/data/test_image_multi_face.png differ
diff --git a/bob/ip/facedetect/mtcnn.py b/bob/ip/facedetect/mtcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..e18287eefc8382da6ced5da7e96752785a93f099
--- /dev/null
+++ b/bob/ip/facedetect/mtcnn.py
@@ -0,0 +1,140 @@
+# Example taken from:
+# https://github.com/blaueck/tf-mtcnn/blob/master/mtcnn_tfv2.py
+
+import logging
+
+import pkg_resources
+from bob.io.image import to_matplotlib
+from bob.ip.color import gray_to_rgb
+
+logger = logging.getLogger(__name__)
+
+
+class MTCNN:
+
+    """MTCNN v1 wrapper for Tensorflow 2. See
+    https://kpzhang93.github.io/MTCNN_face_detection_alignment/index.html for
+    more details on MTCNN.
+
+    Attributes
+    ----------
+    factor : float
+        Factor is a trade-off between performance and speed.
+    min_size : int
+        Minimum face size to be detected.
+    thresholds : list
+        Thresholds are a trade-off between false positives and missed detections.
+    """
+
+    def __init__(self, min_size=40, factor=0.709, thresholds=(0.6, 0.7, 0.7), **kwargs):
+        super().__init__(**kwargs)
+        self.min_size = min_size
+        self.factor = factor
+        self.thresholds = thresholds
+        self._graph_path = pkg_resources.resource_filename(__name__, "data/mtcnn.pb")
+
+        # Avoids loading graph at initilization
+        self._fun = None
+
+    @property
+    def mtcnn_fun(self):
+        import tensorflow as tf
+
+        if self._fun is None:
+            # wrap graph function as a callable function
+            self._fun = tf.compat.v1.wrap_function(
+                self._graph_fn,
+                [
+                    tf.TensorSpec(shape=[None, None, 3], dtype=tf.float32),
+                ],
+            )
+        return self._fun
+
+    def _graph_fn(self, img):
+        import tensorflow as tf
+
+        with open(self._graph_path, "rb") as f:
+            graph_def = tf.compat.v1.GraphDef.FromString(f.read())
+
+        prob, landmarks, box = tf.compat.v1.import_graph_def(
+            graph_def,
+            input_map={
+                "input:0": img,
+                "min_size:0": tf.convert_to_tensor(self.min_size, dtype=float),
+                "thresholds:0": tf.convert_to_tensor(self.thresholds, dtype=float),
+                "factor:0": tf.convert_to_tensor(self.factor, dtype=float),
+            },
+            return_elements=["prob:0", "landmarks:0", "box:0"],
+            name="",
+        )
+        return box, prob, landmarks
+
+    def detect(self, image):
+        """Detects all faces in the image.
+
+        Parameters
+        ----------
+        image : numpy.ndarray
+            An RGB image in Bob format.
+
+        Returns
+        -------
+        tuple
+            A tuple of boxes, probabilities, and landmarks.
+        """
+        if len(image.shape) == 2:
+            image = gray_to_rgb(image)
+
+        # Assuming image is Bob format and RGB
+        assert image.shape[0] == 3, image.shape
+        # MTCNN expects BGR opencv format
+        image = to_matplotlib(image)
+        image = image[..., ::-1]
+
+        boxes, probs, landmarks = self.mtcnn_fun(image)
+        return boxes, probs, landmarks
+
+    def annotations(self, image):
+        """Detects all faces in the image and returns annotations in bob format.
+
+        Parameters
+        ----------
+        image : numpy.ndarray
+            An RGB image in Bob format.
+
+        Returns
+        -------
+        list
+            A list of annotations. Annotations are dictionaries that contain the
+            following keys: ``topleft``, ``bottomright``, ``reye``, ``leye``, ``nose``,
+            ``mouthright``, ``mouthleft``, and ``quality``.
+        """
+        boxes, probs, landmarks = self.detect(image)
+
+        # Iterate over all the detected faces
+        annots = []
+        for box, prob, lm in zip(boxes, probs, landmarks):
+            topleft = float(box[0]), float(box[1])
+            bottomright = float(box[2]), float(box[3])
+            right_eye = float(lm[0]), float(lm[5])
+            left_eye = float(lm[1]), float(lm[6])
+            nose = float(lm[2]), float(lm[7])
+            mouthright = float(lm[3]), float(lm[8])
+            mouthleft = float(lm[4]), float(lm[9])
+            annots.append(
+                {
+                    "topleft": topleft,
+                    "bottomright": bottomright,
+                    "reye": right_eye,
+                    "leye": left_eye,
+                    "nose": nose,
+                    "mouthright": mouthright,
+                    "mouthleft": mouthleft,
+                    "quality": float(prob),
+                }
+            )
+        return annots
+
+    def __call__(self, img):
+        """Wrapper for the annotations method."""
+        return self.annotations(img)
diff --git a/bob/ip/facedetect/tests/test_mtcnn.py b/bob/ip/facedetect/tests/test_mtcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3bb0c0324ba3474d85a6ba0ea4822ede36079ab
--- /dev/null
+++ b/bob/ip/facedetect/tests/test_mtcnn.py
@@ -0,0 +1,52 @@
+
+from bob.ip.facedetect.tests.utils import is_library_available
+
+import bob.io.image
+import bob.io.base
+import bob.io.base.test_utils
+
+import numpy
+
+
+# An image with one face
+face_image = bob.io.base.load(
+    bob.io.base.test_utils.datafile(
+        'testimage.jpg', 'bob.ip.facedetect'
+    )
+)
+
+# An image with 6 faces
+face_image_multiple = bob.io.base.load(
+    bob.io.base.test_utils.datafile(
+        'test_image_multi_face.png', 'bob.ip.facedetect'
+    )
+)
+
+
+def _assert_mtcnn_annotations(annot):
+    """
+    Verifies that MTCNN returns the correct coordinates for ``testimage``.
+    """
+    assert len(annot) == 1, f"len: {len(annot)}; {annot}"
+    face = annot[0]
+    assert [int(x) for x in face['topleft']] == [68, 76], face
+    assert [int(x) for x in face['bottomright']] == [344, 274], face
+    assert [int(x) for x in face['reye']] == [180, 129], face
+    assert [int(x) for x in face['leye']] == [175, 220], face
+    assert numpy.allclose(face['quality'], 0.9998974), face
+
+@is_library_available("tensorflow")
+def test_mtcnn():
+    """MTCNN should annotate one face correctly."""
+    from bob.ip.facedetect.mtcnn import MTCNN
+    mtcnn_annotator = MTCNN()
+    annot = mtcnn_annotator.annotations(face_image)
+    _assert_mtcnn_annotations(annot)
+
+@is_library_available("tensorflow")
+def test_mtcnn_multiface():
+    """MTCNN should find multiple faces in an image."""
+    from bob.ip.facedetect.mtcnn import MTCNN
+    mtcnn_annotator = MTCNN()
+    annot = mtcnn_annotator.annotations(face_image_multiple)
+    assert len(annot) == 6
diff --git a/bob/ip/facedetect/tests/utils.py b/bob/ip/facedetect/tests/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd655e8c79b68bd4acab5f89cb97d5178e97b60c
--- /dev/null
+++ b/bob/ip/facedetect/tests/utils.py
@@ -0,0 +1,22 @@
+from nose.plugins.skip import SkipTest
+import functools
+import importlib
+
+def is_library_available(library):
+    """Decorator to check if a library is present, before running that test"""
+
+    def _is_library_available(function):
+        @functools.wraps(function)
+        def wrapper(*args, **kwargs):
+            try:
+                importlib.import_module(library)
+
+                return function(*args, **kwargs)
+            except ImportError as e:
+                raise SkipTest(
+                    f"Skipping test since `{library}` is not available: %s" % e
+                )
+
+        return wrapper
+
+    return _is_library_available
diff --git a/conda/meta.yaml b/conda/meta.yaml
index d570ef53d7b52a1f3ab7b4301d91420a5748658d..a5e4cd1bf741d698bfb683563f4f1e4aa39f57e2 100644
--- a/conda/meta.yaml
+++ b/conda/meta.yaml
@@ -40,15 +40,18 @@ requirements:
     - boost {{ boost }}
     - numpy {{ numpy }}
     - scipy {{ scipy }}
+    - matplotlib {{ matplotlib }}
+    - tensorflow {{ tensorflow }}  # [linux]
   run:
     - python
     - setuptools
     - boost
-    - scipy
-    - scikit-image
+    - {{ pin_compatible('scipy') }}
+    - {{ pin_compatible('scikit-image') }}
     - {{ pin_compatible('numpy') }}
   run_constrained:
-    - matplotlib
+    - {{ pin_compatible('matplotlib') }}
+    - {{ pin_compatible('tensorflow') }}  # [linux]
 
 test:
   imports:
@@ -75,6 +78,7 @@ test:
     - sphinx
     - sphinx_rtd_theme
     - matplotlib
+    - tensorflow  # [linux]
 
 about:
   home: https://www.idiap.ch/software/bob/
diff --git a/doc/index.rst b/doc/index.rst
index 9c22a6111fc9cfb2899bc78f70f716ff9bce9274..1bb83ec2800f300bd695c9c14ebd759c14966125 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -34,6 +34,7 @@ Documentation
    :maxdepth: 2
 
    guide
+   mtcnn
    py_api
 
 
diff --git a/doc/mtcnn.rst b/doc/mtcnn.rst
new file mode 100644
index 0000000000000000000000000000000000000000..adbe1ba8f82def9aeba90e951e7915d25689a20e
--- /dev/null
+++ b/doc/mtcnn.rst
@@ -0,0 +1,18 @@
+
+.. _bob.ip.facedetect.mtcnn:
+
+============================
+ Face detection using MTCNN
+============================
+
+This package comes with a wrapper around the MTCNN (v1) face detector. See
+https://kpzhang93.github.io/MTCNN_face_detection_alignment/index.html for more
+information on MTCNN. The model is directly converted from the caffe model using code in
+https://github.com/blaueck/tf-mtcnn
+
+See below for an example on how to use
+:any:`bob.ip.facedetect.mtcnn.MTCNN`:
+
+.. plot:: plot/detect_faces_mtcnn.py
+   :include-source: True
+
diff --git a/doc/plot/detect_faces_mtcnn.py b/doc/plot/detect_faces_mtcnn.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9ec7001e0062a33bb88cc5e66886bfa42929b29
--- /dev/null
+++ b/doc/plot/detect_faces_mtcnn.py
@@ -0,0 +1,67 @@
+import matplotlib.pyplot as plt
+from bob.io.base import load
+from bob.io.base.test_utils import datafile
+from bob.io.image import imshow
+from bob.ip.facedetect.mtcnn import MTCNN
+from matplotlib.patches import Circle
+from matplotlib.patches import Rectangle
+
+# load colored test image
+color_image = load(datafile("test_image_multi_face.png", "bob.ip.facedetect"))
+is_tf_available = True
+try:
+    import tensorflow
+except Exception:
+    is_tf_available = False
+
+if not is_tf_available:
+    imshow(color_image)
+else:
+
+    # detect all face
+    detector = MTCNN()
+    detections = detector(color_image)
+
+    imshow(color_image)
+    plt.axis("off")
+
+    for annotations in detections:
+        topleft = annotations["topleft"]
+        bottomright = annotations["bottomright"]
+        size = bottomright[0] - topleft[0], bottomright[1] - topleft[1]
+        # draw bounding boxes
+        plt.gca().add_patch(
+            Rectangle(
+                topleft[::-1],
+                size[1],
+                size[0],
+                edgecolor="b",
+                facecolor="none",
+                linewidth=2,
+            )
+        )
+        # draw face landmarks
+        for key, color in (
+            ("reye", "r"),
+            ("leye", "g"),
+            ("nose", "b"),
+            ("mouthright", "k"),
+            ("mouthleft", "w"),
+        ):
+            plt.gca().add_patch(
+                Circle(
+                    annotations[key][::-1],
+                    radius=2,
+                    edgecolor=color,
+                    facecolor="none",
+                    linewidth=2,
+                )
+            )
+        # show quality of detections
+        plt.text(
+            topleft[1],
+            topleft[0],
+            round(annotations["quality"], 3),
+            color="b",
+            fontsize=14,
+        )
diff --git a/doc/py_api.rst b/doc/py_api.rst
index cd63337d353b609ad711728f630088c1367b7a6a..443dcf784c91d7a1ecdd4a58ad1e2609f8ba52e3 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -40,3 +40,4 @@ Detailed Information
 --------------------
 
 .. automodule:: bob.ip.facedetect
+.. automodule:: bob.ip.facedetect.mtcnn