diff --git a/bob/pad/face/config/preprocessor/frame_difference.py b/bob/pad/face/config/preprocessor/frame_difference.py
new file mode 100644
index 0000000000000000000000000000000000000000..349f172d2991113939ad131ec29c0989a920eca6
--- /dev/null
+++ b/bob/pad/face/config/preprocessor/frame_difference.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+from bob.pad.face.preprocessor import FrameDifference
+
+
+#=======================================================================================
+# Define instances here:
+
+NUMBER_OF_FRAMES = 200 # process at most 200 frames
+CHECK_FACE_SIZE_FLAG = True # Check size of the face
+MIN_FACE_SIZE = 50
+
+frame_diff_min_size_50_200_frames = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
+                                                    check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                                                    min_face_size = MIN_FACE_SIZE)
+
diff --git a/bob/pad/face/preprocessor/FrameDifference.py b/bob/pad/face/preprocessor/FrameDifference.py
new file mode 100644
index 0000000000000000000000000000000000000000..696b90e0a1f79df52977f381b8dce666f8f0c881
--- /dev/null
+++ b/bob/pad/face/preprocessor/FrameDifference.py
@@ -0,0 +1,342 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Fri May 12 14:14:23 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.preprocessor import Preprocessor
+
+import numpy as np
+
+import bob.bio.video
+
+import bob.ip.base
+
+import bob.ip.color
+
+#==============================================================================
+# Main body:
+
+class FrameDifference(Preprocessor, object):
+    """
+    This class is designed to compute frame differences for both facial and
+    background regions. The constraint of minimal size of the face can be
+    applied to input video selecting only the frames overcoming the threshold.
+    This behavior is controlled by ``check_face_size_flag`` and ``min_face_size``
+    arguments of the class.
+    It is also possible to compute the frame differences for a limited number
+    of frames specifying the ``number_of_frames`` parameter.
+
+    **Parameters:**
+
+    ``number_of_frames`` : :py:class:`int`
+        The number of frames to extract the frame differences from.
+        If ``None``, all frames of the input video are used. Default: ``None``.
+
+    ``check_face_size_flag`` : :py:class:`bool`
+        If True, only return the frames containing faces of the size above the
+        specified threshold ``min_face_size``. Default: ``False``.
+
+    ``min_face_size`` : :py:class:`int`
+        The minimal size of the face in pixels. Only valid when ``check_face_size_flag``
+        is set to True. Default: 50.
+    """
+
+    def __init__(self,
+                 number_of_frames = None,
+                 check_face_size_flag = False,
+                 min_face_size = 50):
+
+        super(FrameDifference, self).__init__(number_of_frames = number_of_frames,
+                                              check_face_size_flag = check_face_size_flag,
+                                              min_face_size = min_face_size)
+
+        self.number_of_frames = number_of_frames
+        self.check_face_size_flag = check_face_size_flag
+        self.min_face_size = min_face_size
+
+
+    #==========================================================================
+    def eval_face_differences(self, previous, current, annotations):
+        """
+        Evaluates the normalized frame difference on the face region.
+
+        If bounding_box is None or invalid, returns 0.
+
+        **Parameters:**
+
+        ``previous`` : 2D :py:class:`numpy.ndarray`
+            Previous frame as a gray-scaled image
+
+        ``current`` : 2D :py:class:`numpy.ndarray`
+            The current frame as a gray-scaled image
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``.
+
+        **Returns:**
+
+        ``face`` : :py:class:`float`
+            A size normalized integral difference of facial regions in two input
+            images.
+        """
+
+        prev = previous[annotations['topleft'][0]:annotations['bottomright'][0],
+                        annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        curr = current[annotations['topleft'][0]:annotations['bottomright'][0],
+                       annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        face_diff = abs(curr.astype('int32') - prev.astype('int32'))
+
+        face = face_diff.sum()
+
+        face /= float(face_diff.size)
+
+        return face
+
+
+    #==========================================================================
+    def eval_background_differences(self, previous, current, annotations, border=None):
+        """
+        Evaluates the normalized frame difference on the background.
+
+        If bounding_box is None or invalid, returns 0.
+
+        **Parameters:**
+
+        ``previous`` : 2D :py:class:`numpy.ndarray`
+            Previous frame as a gray-scaled image
+
+        ``current`` : 2D :py:class:`numpy.ndarray`
+            The current frame as a gray-scaled image
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``.
+
+        ``border`` : :py:class:`int`
+            The border size to consider. If set to ``None``, consider all image from the
+            face location up to the end. Default: ``None``.
+
+        **Returns:**
+
+        ``bg`` : :py:class:`float`
+            A size normalized integral difference of non-facial regions in two input
+            images.
+        """
+
+        height = annotations['bottomright'][0] - annotations['topleft'][0]
+        width = annotations['bottomright'][1] - annotations['topleft'][1]
+
+        full_diff = abs(current.astype('int32') - previous.astype('int32'))
+
+        if border is None:
+            full = full_diff.sum()
+            full_size = full_diff.size
+
+        else:
+
+            y1 = annotations['topleft'][0] - border
+            if y1 < 0: y1 = 0
+            x1 = annotations['topleft'][1] - border
+            if x1 < 0: x1 = 0
+            y2 = y1 + height + (2*border)
+            if y2 > full_diff.shape[0]: y2 = full_diff.shape[0]
+            x2 = x1 + width + (2*border)
+            if x2 > full_diff.shape[1]: x2 = full_diff.shape[1]
+            full = full_diff[y1:y2, x1:x2].sum()
+            full_size = full_diff[y1:y2, x1:x2].size
+
+        face_diff = full_diff[annotations['topleft'][0]:(annotations['topleft'][0]+height),
+            annotations['topleft'][1]:(annotations['topleft'][1]+width)]
+
+        # calculates the differences in the face and background areas
+        face = face_diff.sum()
+        bg = full - face
+
+        normalization = float(full_size - face_diff.size)
+        if normalization < 1: #prevents zero division
+            bg = 0.0
+        else:
+            bg /= float(full_size - face_diff.size)
+
+        return bg
+
+
+    #==========================================================================
+    def check_face_size(self, frame_container, annotations, min_face_size):
+        """
+        Return the FrameContainer containing the frames with faces of the
+        size overcoming the specified threshold. The annotations for the selected
+        frames are also returned.
+
+        **Parameters:**
+
+        ``frame_container`` : FrameContainer
+            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        ``min_face_size`` : :py:class:`int`
+            The minimal size of the face in pixels.
+
+        **Returns:**
+
+        ``selected_frames`` : FrameContainer
+            Selected frames stored in the FrameContainer.
+
+        ``selected_annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for selected frames.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+        """
+
+        selected_frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        selected_annotations = {}
+
+        selected_frame_idx = 0
+
+        for idx in range(0, len(annotations)): # idx - frame index
+
+            frame_annotations = annotations[str(idx)] # annotations for particular frame
+
+            # size of current face
+            face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+
+            if face_size >= min_face_size: # check if face size is above the threshold
+
+                selected_frame = frame_container[idx][1] # get current frame
+
+                selected_frames.add(selected_frame_idx, selected_frame) # add current frame to FrameContainer
+
+                selected_annotations[str(selected_frame_idx)] = annotations[str(idx)]
+
+                selected_frame_idx = selected_frame_idx + 1
+
+        return selected_frames, selected_annotations
+
+
+    #==========================================================================
+    def comp_face_bg_diff(self, frames, annotations, number_of_frames = None):
+        """
+        This function computes the frame differences for both facial and background
+        regions. These parameters are computed for ``number_of_frames`` frames
+        in the input FrameContainer.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            RGB video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        ``number_of_frames`` : :py:class:`int`
+            The number of frames to use in processing. If ``None``, all frames of the
+            input video are used. Default: ``None``.
+
+        **Returns:**
+
+        ``diff`` : 2D :py:class:`numpy.ndarray`
+            An array of the size ``(number_of_frames - 1) x 2``.
+            The first column contains frame differences of facial regions.
+            The second column contains frame differences of non-facial/background regions.
+        """
+
+        # Compute the number of frames to process:
+        if number_of_frames is not None:
+            number_of_frames = np.min([len(frames), number_of_frames])
+        else:
+            number_of_frames = len(frames)
+
+        previous = frames[0][1] # the first frame in the video
+
+        if len(previous.shape) == 3: # if RGB convert to gray-scale
+            previous = bob.ip.color.rgb_to_gray(previous)
+
+        diff = []
+
+        for k in range(1, number_of_frames):
+
+            current = frames[k][1]
+
+            if len(current.shape) == 3: # if RGB convert to gray-scale
+                current = bob.ip.color.rgb_to_gray(current)
+
+            face_diff = self.eval_face_differences(previous, current, annotations[str(k)])
+            bg_diff = self.eval_background_differences(previous, current, annotations[str(k)], None)
+
+            diff.append((face_diff, bg_diff))
+
+            # swap buffers: current <=> previous
+            tmp = previous
+            previous = current
+            current = tmp
+
+        if not diff: # if list is empty
+
+            diff = [(np.NaN, np.NaN)]
+
+        diff = np.vstack(diff)
+
+        return diff
+
+
+    #==========================================================================
+    def __call__(self, frames, annotations):
+        """
+        This method calls the ``comp_face_bg_diff`` function of this class
+        computing the frame differences for both facial and background regions.
+        The frame differences are computed for selected frames, which are returned
+        by ``check_face_size`` function of this class. This ``check_face_size`` is
+        done only if ``check_face_size_flag = True``.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            RGB video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        **Returns:**
+
+        ``diff`` : 2D :py:class:`numpy.ndarray`
+            An array of the size ``(number_of_frames - 1) x 2``.
+            The first column contains frame differences of facial regions.
+            The second column contains frame differences of non-facial/background regions.
+        """
+
+        if self.check_face_size_flag:
+
+            selected_frames, selected_annotations = self.check_face_size(frames, annotations, self.min_face_size)
+
+        diff = self.comp_face_bg_diff(frames = selected_frames,
+                                      annotations = selected_annotations,
+                                      number_of_frames = self.number_of_frames)
+
+        return diff
+
+
diff --git a/bob/pad/face/preprocessor/__init__.py b/bob/pad/face/preprocessor/__init__.py
index 9f59b55255baff988b65a5782bdef9d4513cfc82..c9c5a4225d2dfff5065d2559ecea9c6ebed03b0f 100644
--- a/bob/pad/face/preprocessor/__init__.py
+++ b/bob/pad/face/preprocessor/__init__.py
@@ -1,5 +1,6 @@
 from .VideoFaceCrop import VideoFaceCrop
 from .ImageFaceCrop import ImageFaceCrop
+from .FrameDifference import FrameDifference
 
 
 def __appropriate__(*args):
@@ -23,5 +24,6 @@ def __appropriate__(*args):
 __appropriate__(
     VideoFaceCrop,
     ImageFaceCrop,
+    FrameDifference,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/setup.py b/setup.py
index 116ef87a3ecd4afa947dfcf904ee833949483343..338b4dcfe4b03fda511137b5ae065a7604822587 100644
--- a/setup.py
+++ b/setup.py
@@ -113,6 +113,7 @@ setup(
             'video-face-crop-preproc-64-face-50-local-cropper = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64_face_50_local_cropper',
             'video-face-crop-preproc-64-face-50-local-cropper-rgb = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64_face_50_local_cropper_rgb',
             'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
+            'frame-diff-min-size-50-200-frames = bob.pad.face.config.preprocessor.frame_difference:frame_diff_min_size_50_200_frames',
             ],
 
         # registered preprocessors: