diff --git a/bob/pad/face/preprocessor/ImageFaceCrop.py b/bob/pad/face/preprocessor/ImageFaceCrop.py
index d79e21f21b48d1cf9207cda87f5fa83b210fa0fa..10b39071acaf8f7091b9c175028fa4fb650ed23c 100644
--- a/bob/pad/face/preprocessor/ImageFaceCrop.py
+++ b/bob/pad/face/preprocessor/ImageFaceCrop.py
@@ -17,8 +17,144 @@ import bob.ip.color
 
 import bob.ip.base
 
-#==============================================================================
-# Main body:
+
+#==========================================================================
+def normalize_image_size_in_grayscale(image, annotations, face_size, use_face_alignment):
+    """
+    This function crops the face in the input Gray-scale image given annotations
+    defining the face bounding box, and eye positions.
+    The size of the face is also normalized to the pre-defined dimensions.
+
+    Two normalization options are available, which are controlled by
+    ``use_face_alignment`` flag, see below.
+
+    **Parameters:**
+
+    ``image`` : 2D :py:class:`numpy.ndarray`
+        Gray-scale input image.
+
+    ``annotations`` : :py:class:`dict`
+        A dictionary containing annotations of the face bounding box,
+        eye locations and facial landmarks.
+        Dictionary must be as follows: ``{'topleft': (row, col), 'bottomright': (row, col),
+        'left_eye': (row, col), 'right_eye': (row, col)}``.
+
+    ``face_size`` : :py:class:`int`
+        The size of the face after normalization.
+
+    ``use_face_alignment`` : :py:class:`bool`
+        If ``False``, the re-sizing from this publication is used:
+        "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+        If ``True`` the facial image is both re-sized and aligned using
+        positions of the eyes, which are given in the annotations.
+
+    **Returns:**
+
+    ``normbbx`` : 2D :py:class:`numpy.ndarray`
+        An image of the cropped face of the size (face_size, face_size).
+    """
+
+    if use_face_alignment:
+
+        face_eyes_norm = bob.ip.base.FaceEyesNorm(
+            eyes_distance=((face_size + 1) / 2.),
+            crop_size=(face_size, face_size),
+            eyes_center=(face_size / 4., (face_size - 0.5) / 2.))
+
+        right_eye, left_eye = annotations['right_eye'], annotations['left_eye']
+
+        normalized_image = face_eyes_norm( image, right_eye = right_eye, left_eye = left_eye )
+
+        normbbx=normalized_image.astype('uint8')
+
+    else:
+
+        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][
+            0], annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        tempbbx = np.ndarray((face_size, face_size), 'float64')
+        normbbx = np.ndarray((face_size, face_size), 'uint8')
+        bob.ip.base.scale(cutframe, tempbbx)  # normalization
+        tempbbx_ = tempbbx + 0.5
+        tempbbx_ = np.floor(tempbbx_)
+        normbbx = np.cast['uint8'](tempbbx_)
+
+    return normbbx
+
+
+#==========================================================================
+def normalize_image_size(image, annotations, face_size,
+                         rgb_output_flag, use_face_alignment):
+    """
+    This function crops the face in the input image given annotations defining
+    the face bounding box. The size of the face is also normalized to the
+    pre-defined dimensions. For RGB inputs it is possible to return both
+    color and gray-scale outputs. This option is controlled by ``rgb_output_flag``.
+
+    Two normalization options are available, which are controlled by
+    ``use_face_alignment`` flag, see below.
+
+    **Parameters:**
+
+    ``image`` : 2D or 3D :py:class:`numpy.ndarray`
+        Input image (RGB or gray-scale).
+
+    ``annotations`` : :py:class:`dict`
+        A dictionary containing annotations of the face bounding box,
+        eye locations and facial landmarks.
+        Dictionary must be as follows: ``{'topleft': (row, col), 'bottomright': (row, col),
+        'left_eye': (row, col), 'right_eye': (row, col)}``.
+
+    ``face_size`` : :py:class:`int`
+        The size of the face after normalization.
+
+    ``rgb_output_flag`` : :py:class:`bool`
+        Return RGB cropped face if ``True``, otherwise a gray-scale image is
+        returned. Default: ``False``.
+
+    ``use_face_alignment`` : :py:class:`bool`
+        If ``False``, the facial image re-sizing from this publication is used:
+        "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+        If ``True`` the facial image is both re-sized, and aligned, using
+        positions of the eyes, which are given in the annotations.
+
+    **Returns:**
+
+    ``face`` : 2D or 3D :py:class:`numpy.ndarray`
+        An image of the cropped face of the size (face_size, face_size),
+        RGB 3D or gray-scale 2D.
+    """
+
+    if len(image.shape) == 3:
+
+        if not (rgb_output_flag):
+
+            image = bob.ip.color.rgb_to_gray(image)
+
+    if len(image.shape) == 2:
+
+        image = [image]  # make gray-scale image an iterable
+
+    result = []
+
+    for image_channel in image:  # for all color channels in the input image
+
+        cropped_face = normalize_image_size_in_grayscale(
+            image_channel, annotations, face_size, use_face_alignment)
+
+        result.append(cropped_face)
+
+    face = np.stack(result, axis=0)
+
+    face = np.squeeze(face)  # squeeze 1-st dimension for gray-scale images
+
+    return face
+
+
+
+
+
+
 
 
 class ImageFaceCrop(Preprocessor):
@@ -50,151 +186,103 @@ class ImageFaceCrop(Preprocessor):
         self.face_size = face_size
         self.rgb_output_flag = rgb_output_flag
 
+
     #==========================================================================
-    def normalize_image_size_in_grayscale(self, image, annotations, face_size, use_face_alignment):
+    def __call__(self, image, annotations):
         """
-        This function crops the face in the input Gray-scale image given annotations
-        defining the face bounding box, and eye positions.
-        The size of the face is also normalized to the pre-defined dimensions.
-
-        Two normalization options are available, which are controlled by
-        ``use_face_alignment`` flag, see below.
+        Call the ``normalize_image_size()`` method of this class.
 
         **Parameters:**
 
-        ``image`` : 2D :py:class:`numpy.ndarray`
-            Gray-scale input image.
+        ``image`` : 2D or 3D :py:class:`numpy.ndarray`
+            Input image (RGB or gray-scale).
 
         ``annotations`` : :py:class:`dict`
-            A dictionary containing annotations of the face bounding box,
-            eye locations and facial landmarks.
-            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col),
-            'left_eye': (row, col), 'right_eye': (row, col)``.
-
-        ``face_size`` : :py:class:`int`
-            The size of the face after normalization.
-
-        ``use_face_alignment`` : :py:class:`bool`
-            If ``False``, the re-sizing from this publication is used:
-            "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
-            If ``True`` the facial image is both re-sized and aligned using
-            positions of the eyes, which are given in the annotations.
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
 
         **Returns:**
 
-        ``normbbx`` : 2D :py:class:`numpy.ndarray`
-            An image of the cropped face of the size (self.face_size, self.face_size).
+        ``norm_face_image`` : 2D or 3D :py:class:`numpy.ndarray`
+            An image of the cropped face of the size (self.face_size, self.face_size),
+            rgb 3D or gray-scale 2D.
         """
 
-        if use_face_alignment:
+        norm_face_image = self.normalize_image_size(
+            image, annotations, self.face_size, self.rgb_output_flag,
+            self.use_face_alignment)
+
+        return norm_face_image
+
 
-            face_eyes_norm = bob.ip.base.FaceEyesNorm(
-                eyes_distance=(face_size / 2.),
-                crop_size=(face_size, face_size),
-                eyes_center=(face_size / 4., (face_size - 0.5) / 2.))
 
-            right_eye,left_eye=annotations['right_eye'],annotations['left_eye']
 
-            normalized_image = face_eyes_norm( image, right_eye = right_eye, left_eye = left_eye )
 
-            normbbx=normalized_image.astype('uint8')
 
-        else:
 
-            cutframe = image[annotations['topleft'][0]:annotations['bottomright'][
-                0], annotations['topleft'][1]:annotations['bottomright'][1]]
 
-            tempbbx = np.ndarray((face_size, face_size), 'float64')
-            normbbx = np.ndarray((face_size, face_size), 'uint8')
-            bob.ip.base.scale(cutframe, tempbbx)  # normalization
-            tempbbx_ = tempbbx + 0.5
-            tempbbx_ = np.floor(tempbbx_)
-            normbbx = np.cast['uint8'](tempbbx_)
 
-        return normbbx
 
     #==========================================================================
-    def normalize_image_size(self, image, annotations, face_size,
-                             rgb_output_flag):
+    def __call__(self, frames, annotations):
         """
-        This function crops the face in the input image given annotations defining
-        the face bounding box. The size of the face is also normalized to the
-        pre-defined dimensions. For RGB inputs it is possible to return both
-        color and gray-scale outputs. This option is controlled by ``rgb_output_flag``.
-
-        The algorithm is identical to the following paper:
-        "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+        Crop the face in the input video frames given annotations for each frame.
 
         **Parameters:**
 
-        ``image`` : 2D or 3D :py:class:`numpy.ndarray`
-            Input image (RGB or gray-scale).
+        ``frames`` : FrameContainer
+            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
 
         ``annotations`` : :py:class:`dict`
-            A dictionary containing annotations of the face bounding box.
-            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
-
-        ``face_size`` : :py:class:`int`
-            The size of the face after normalization.
-
-        ``rgb_output_flag`` : :py:class:`bool`
-            Return RGB cropped face if ``True``, otherwise a gray-scale image is
-            returned. Default: ``False``.
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
 
         **Returns:**
 
-        ``face`` : 2D or 3D :py:class:`numpy.ndarray`
-            An image of the cropped face of the size (self.face_size, self.face_size),
-            rgb 3D or gray-scale 2D.
+        ``preprocessed_video`` : FrameContainer
+            Cropped faces stored in the FrameContainer.
         """
 
-        if len(image.shape) == 3:
 
-            if not (rgb_output_flag):
 
-                image = bob.ip.color.rgb_to_gray(image)
 
-        if len(image.shape) == 2:
 
-            image = [image]  # make gray-scale image an iterable
 
-        result = []
 
-        for image_channel in image:  # for all color channels in the input image
+        if self.detect_faces_flag:
 
-            cropped_face = self.normalize_image_size_in_grayscale(
-                image_channel, annotations, face_size)
+            if self.max_image_size: # max_image_size = 1920
 
-            result.append(cropped_face)
+                if np.max(frames[0][1].shape) > self.max_image_size:
 
-        face = np.stack(result, axis=0)
+                    return bob.bio.video.FrameContainer()
 
-        face = np.squeeze(face)  # squeeze 1-st dimension for gray-scale images
+            try:
 
-        return face
+                annotations = detect_face_landmarks_in_video(frames,
+                                                    self.face_detection_method) #TODO: new dicts
+            except:
 
-    #==========================================================================
-    def __call__(self, image, annotations):
-        """
-        Call the ``normalize_image_size()`` method of this class.
+                return bob.bio.video.FrameContainer()
 
-        **Parameters:**
+        if len(frames) != len(annotations):  # if some annotations are missing
 
-        ``image`` : 2D or 3D :py:class:`numpy.ndarray`
-            Input image (RGB or gray-scale).
+            ## Select only annotated frames:
+            frames, annotations = self.select_annotated_frames(
+                frames, annotations)
 
-        ``annotations`` : :py:class:`dict`
-            A dictionary containing annotations of the face bounding box.
-            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+        preprocessed_video = self.video_preprocessor(
+            frames=frames, annotations=annotations)
 
-        **Returns:**
+        if self.check_face_size_flag:
+
+            preprocessed_video = self.check_face_size(
+                preprocessed_video, annotations, self.min_face_size)
+
+        return preprocessed_video
 
-        ``norm_face_image`` : 2D or 3D :py:class:`numpy.ndarray`
-            An image of the cropped face of the size (self.face_size, self.face_size),
-            rgb 3D or gray-scale 2D.
-        """
 
-        norm_face_image = self.normalize_image_size(
-            image, annotations, self.face_size, self.rgb_output_flag)
 
-        return norm_face_image