diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py
index ad7f963b0fdb8f92319c02ad8210c899a44dbccc..72156454a24fed51f8bce0102511f49d5e3f2175 100644
--- a/bob/pad/face/config/preprocessor/video_face_crop.py
+++ b/bob/pad/face/config/preprocessor/video_face_crop.py
@@ -17,15 +17,50 @@ MIN_FACE_SIZE = 50
 USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
 RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
 DETECT_FACES_FLAG = True      # find annotations locally replacing the database annotations
+FACE_DETECTION_METHOD = "dlib"
+
+rgb_face_detector_dlib = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
+                                       cropped_positions = CROPPED_POSITIONS,
+                                       fixed_positions = FIXED_POSITIONS,
+                                       mask_sigma = MASK_SIGMA,
+                                       mask_neighbors = MASK_NEIGHBORS,
+                                       mask_seed = None,
+                                       check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                                       min_face_size = MIN_FACE_SIZE,
+                                       use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
+                                       rgb_output_flag = RGB_OUTPUT_FLAG,
+                                       detect_faces_flag = DETECT_FACES_FLAG,
+                                       face_detection_method = FACE_DETECTION_METHOD)
+
+
+CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
+CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+FIXED_POSITIONS = None
+MASK_SIGMA = None             # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
+MASK_SEED = None              # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MIN_FACE_SIZE = 50
+USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
+DETECT_FACES_FLAG = True      # find annotations locally replacing the database annotations
+FACE_DETECTION_METHOD = "mtcnn"
+
+rgb_face_detector_mtcnn = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
+                                       cropped_positions = CROPPED_POSITIONS,
+                                       fixed_positions = FIXED_POSITIONS,
+                                       mask_sigma = MASK_SIGMA,
+                                       mask_neighbors = MASK_NEIGHBORS,
+                                       mask_seed = None,
+                                       check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                                       min_face_size = MIN_FACE_SIZE,
+                                       use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
+                                       rgb_output_flag = RGB_OUTPUT_FLAG,
+                                       detect_faces_flag = DETECT_FACES_FLAG,
+                                       face_detection_method = FACE_DETECTION_METHOD)
+
+
+
+
+
 
-preprocessor_rgb_face_detect = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                                             cropped_positions = CROPPED_POSITIONS,
-                                             fixed_positions = FIXED_POSITIONS,
-                                             mask_sigma = MASK_SIGMA,
-                                             mask_neighbors = MASK_NEIGHBORS,
-                                             mask_seed = None,
-                                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                             min_face_size = MIN_FACE_SIZE,
-                                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                                             rgb_output_flag = RGB_OUTPUT_FLAG,
-                                             detect_faces_flag = DETECT_FACES_FLAG)
diff --git a/bob/pad/face/preprocessor/VideoFaceCrop.py b/bob/pad/face/preprocessor/VideoFaceCrop.py
index 756bac3ef01fadee56feb89939c1689f6d44ab2d..7689fed94073c15cece1e6c8f7614c12ba86117f 100644
--- a/bob/pad/face/preprocessor/VideoFaceCrop.py
+++ b/bob/pad/face/preprocessor/VideoFaceCrop.py
@@ -84,6 +84,10 @@ class VideoFaceCrop(Preprocessor, object):
         cropping.
         Default: ``False``.
 
+    ``face_detection_method`` : :py:class:`str`
+        A package to be used for face detection. Available options: "dlib" and
+        "mtcnn".
+
     ``kwargs``
         Remaining keyword parameters passed to the Base constructor, such as ``color_channel`` or ``dtype``.
     """
@@ -101,6 +105,7 @@ class VideoFaceCrop(Preprocessor, object):
                  use_local_cropper_flag = False,
                  rgb_output_flag = False,
                  detect_faces_flag = False,
+                 face_detection_method = "dlib",
                  **kwargs):
 
         super(VideoFaceCrop, self).__init__(cropped_image_size = cropped_image_size,
@@ -114,6 +119,7 @@ class VideoFaceCrop(Preprocessor, object):
                                             use_local_cropper_flag = use_local_cropper_flag,
                                             rgb_output_flag = rgb_output_flag,
                                             detect_faces_flag = detect_faces_flag,
+                                            face_detection_method = face_detection_method,
                                             **kwargs)
 
         self.cropped_image_size = cropped_image_size
@@ -127,6 +133,7 @@ class VideoFaceCrop(Preprocessor, object):
         self.use_local_cropper_flag = use_local_cropper_flag
         self.rgb_output_flag = rgb_output_flag
         self.detect_faces_flag = detect_faces_flag
+        self.face_detection_method = face_detection_method
 
         # Save also the data stored in the kwargs:
         for (k, v) in kwargs.items():
@@ -276,15 +283,7 @@ class VideoFaceCrop(Preprocessor, object):
 
         if self.detect_faces_flag:
 
-            annotations_detected = detect_faces_in_video(frames)
-
-            if not annotations_detected:
-
-                annotations = annotations # if now annotations detected use DB annotations
-
-            else:
-
-                annotations = annotations_detected # if face was detected overwrite DB annotations
+            annotations = detect_faces_in_video(frames, self.face_detection_method)
 
         if len(frames) != len(annotations): # if some annotations are missing
 
@@ -315,7 +314,9 @@ class VideoFaceCrop(Preprocessor, object):
             name of the file.
         """
 
-        self.video_preprocessor.write_data(frames, file_name)
+        if frames: # save file if FrameContainer is not empty, otherwise do nothing.
+
+            self.video_preprocessor.write_data(frames, file_name)
 
 
     #==========================================================================
diff --git a/bob/pad/face/utils/face_detection_utils.py b/bob/pad/face/utils/face_detection_utils.py
index c088f3ba391a5a885e453f8d5ee727b14569d1e3..37eeac609441d6bc4863db42d66d6a4815cde1ab 100644
--- a/bob/pad/face/utils/face_detection_utils.py
+++ b/bob/pad/face/utils/face_detection_utils.py
@@ -9,9 +9,10 @@ This file contains face detection utils.
 
 import bob.ip.dlib # for face detection functionality
 
+import bob.ip.mtcnn
 
 #==============================================================================
-def detect_face_in_image(image):
+def detect_face_in_image(image, method):
     """
     This function detects a face in the input image.
 
@@ -20,6 +21,10 @@ def detect_face_in_image(image):
     ``image`` : 3D :py:class:`numpy.ndarray`
         A color image to detect the face in.
 
+    ``method`` : :py:class:`str`
+        A package to be used for face detection. Available options: "dlib" and
+        "mtcnn".
+
     **Returns:**
 
     ``annotations`` : :py:class:`dict`
@@ -28,11 +33,17 @@ def detect_face_in_image(image):
         If no annotations found an empty dictionary is returned.
     """
 
-    data = bob.ip.dlib.FaceDetector().detect_single_face(image)
+    if method == "dlib":
+
+        data = bob.ip.dlib.FaceDetector().detect_single_face(image)
+
+    if method == "mtcnn":
+
+        data = bob.ip.mtcnn.FaceDetector().detect_single_face(image)
 
     annotations = {}
 
-    if data is not None:
+    if ( data is not None ) and ( not all([x is None for x in data]) ):
 
         bounding_box = data[0]
 
@@ -44,7 +55,7 @@ def detect_face_in_image(image):
 
 
 #==============================================================================
-def detect_faces_in_video(frame_container):
+def detect_faces_in_video(frame_container, method):
     """
     This function detects a face in each farme of the input video.
 
@@ -53,6 +64,10 @@ def detect_faces_in_video(frame_container):
     ``frame_container`` : FrameContainer
         FrameContainer containing the frames data.
 
+    ``method`` : :py:class:`str`
+        A package to be used for face detection. Available options: "dlib" and
+        "mtcnn".
+
     **Returns:**
 
     ``annotations`` : :py:class:`dict`
@@ -69,7 +84,7 @@ def detect_faces_in_video(frame_container):
 
         image = frame[1]
 
-        frame_annotations = detect_face_in_image(image)
+        frame_annotations = detect_face_in_image(image, method)
 
         if frame_annotations:
 
diff --git a/setup.py b/setup.py
index 0a0578e9d4355fcdc12bd181768fb43c0c237ffe..5994338a00da26f58f9e9dc5cafaca19978ba57e 100644
--- a/setup.py
+++ b/setup.py
@@ -103,7 +103,8 @@ setup(
         # registered preprocessors:
         'bob.pad.preprocessor': [
             'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
-            'preprocessor-rgb-face-detect = bob.pad.face.config.preprocessor.video_face_crop:preprocessor_rgb_face_detect', # detect faces locally replacing database annotations
+            'rgb-face-detect-dlib = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_dlib', # detect faces locally replacing database annotations
+            'rgb-face-detect-mtcnn = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_mtcnn', # detect faces locally replacing database annotations
             ],
 
         # registered extractors: