diff --git a/bob/pad/face/config/extractor/frame_diff_features.py b/bob/pad/face/config/extractor/frame_diff_features.py
new file mode 100644
index 0000000000000000000000000000000000000000..d0e6fb398d4a216ed90877d286b42599793eda02
--- /dev/null
+++ b/bob/pad/face/config/extractor/frame_diff_features.py
@@ -0,0 +1,19 @@
+#!/usr/bin/env python
+
+from bob.pad.face.extractor import FrameDiffFeatures
+
+
+#=======================================================================================
+# Define instances here:
+
+window_size=20
+overlap=0
+
+frame_diff_feat_extr_w20_over0 = FrameDiffFeatures(window_size=window_size,
+                                                  overlap=overlap)
+
+window_size=100
+
+frame_diff_feat_extr_w100_over0 = FrameDiffFeatures(window_size=window_size,
+                                                  overlap=overlap)
+
diff --git a/bob/pad/face/config/frame_diff_svm.py b/bob/pad/face/config/frame_diff_svm.py
new file mode 100644
index 0000000000000000000000000000000000000000..d55a46237352fd68f936f9d4f43a8aec7281a0a3
--- /dev/null
+++ b/bob/pad/face/config/frame_diff_svm.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+"""
+@author: Olegs Nikisins
+
+This file contains configurations to run Frame Differences and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The idea of the algorithms is inherited from the following paper: [AM11]_.
+"""
+
+
+#=======================================================================================
+sub_directory = 'frame_diff_svm'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+
+#=======================================================================================
+# define preprocessor:
+
+from ..preprocessor import FrameDifference
+
+NUMBER_OF_FRAMES = None # process all frames
+CHECK_FACE_SIZE_FLAG = True # Check size of the face
+MIN_FACE_SIZE = 50 # Minimal size of the face to consider
+
+preprocessor = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
+                               check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                               min_face_size = MIN_FACE_SIZE)
+"""
+In the preprocessing stage the frame differences are computed for both facial and non-facial/background
+regions. In this case all frames of the input video are considered, which is defined by
+``number_of_frames = None``. The frames containing faces of the size below ``min_face_size = 50`` threshold
+are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor.
+The preprocessing idea is introduced in [AM11]_.
+"""
+
+
+#=======================================================================================
+# define extractor:
+
+from ..extractor import FrameDiffFeatures
+
+WINDOW_SIZE=20
+OVERLAP=0
+
+extractor = FrameDiffFeatures(window_size=WINDOW_SIZE,
+                              overlap=OVERLAP)
+"""
+In the feature extraction stage 5 features are extracted for all non-overlapping windows in
+the Frame Difference input signals. Five features are computed for each of windows in the
+facial face regions, the same is done for non-facial regions. The non-overlapping option
+is controlled by ``overlap = 0``. The length of the window is defined by ``window_size``
+argument.
+The features are introduced in the following paper: [AM11]_.
+"""
+
+
+#=======================================================================================
+# define algorithm:
+
+from ..algorithm import VideoSvmPadAlgorithm
+
+MACHINE_TYPE = 'C_SVC'
+KERNEL_TYPE = 'RBF'
+N_SAMPLES = 10000
+TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
+MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
+                                 kernel_type = KERNEL_TYPE,
+                                 n_samples = N_SAMPLES,
+                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
+                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
+                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+"""
+The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The grid search of SVM parameters is used to select the successful settings.
+The grid search is done on the subset of training data.
+The size of this subset is defined by ``n_samples`` parameter.
+
+The data is also mean-std normalized, ``mean_std_norm_flag = True``.
+"""
+
+
+
+
+
diff --git a/bob/pad/face/extractor/FrameDiffFeatures.py b/bob/pad/face/extractor/FrameDiffFeatures.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b14085a5f7001ccb26803ce032a4ce7b65b620b
--- /dev/null
+++ b/bob/pad/face/extractor/FrameDiffFeatures.py
@@ -0,0 +1,326 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Jun 14 10:13:21 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.extractor import Extractor
+
+import numpy as np
+
+import sys
+
+import bob.bio.video
+
+#==============================================================================
+# Main body:
+
+class FrameDiffFeatures(Extractor):
+    """
+    This class is designed to extract features describing frame differences.
+
+    The class allows to compute the following features in the window of the
+    length defined by ``window_size`` argument:
+
+        1. The minimum value observed on the cluster
+        2. The maximum value observed on the cluster
+        3. The mean value observed
+        4. The standard deviation on the cluster (unbiased estimator)
+        5. The DC ratio (D) as defined by:
+
+    .. math::
+
+        D(N) = \frac{\sum_{i=1}^N{|FFT_i|}}{|FFT_0|}
+
+    **Parameters:**
+
+    ``window_size`` : :py:class:`int`
+        The size of the window to use for feature computation.
+
+    ``overlap`` : :py:class:`int`
+        Determines the window overlapping; this number has to be between
+        0 (no overlapping) and 'window-size'-1. Default: 0.
+    """
+
+    def __init__(self,
+                 window_size,
+                 overlap = 0):
+
+        Extractor.__init__(self,
+                           window_size = window_size,
+                           overlap = overlap)
+
+        self.window_size = window_size
+        self.overlap = overlap
+
+
+    #==========================================================================
+    def dcratio(self, arr):
+        """
+        Calculates the DC ratio as defined by the following formula:
+
+        .. math::
+
+            D(N) = \frac{\sum_{i=1}^N{|FFT_i|}}{|FFT_0|}
+
+        **Parameters:**
+
+        ``arr`` : 1D :py:class:`numpy.ndarray`
+            A 1D array containg frame differences.
+
+        **Returns:**
+
+        ``dcratio`` : :py:class:`float`
+            Calculated DC ratio.
+        """
+
+        if arr.shape[0] <= 1:
+            return 0.
+
+        res = np.fft.fft(arr.astype('complex128'))
+        res = np.absolute(res)  # absolute value
+
+        if res[0] == 0:
+            s = sum(res[1:])
+            if s > 0:
+                return sys.float_info.max
+            elif s < 0:
+                return -sys.float_info.max
+            else:
+                return 0
+
+        dcratio = sum(res[1:]) / res[0]
+
+        return dcratio
+
+
+    #==========================================================================
+    def remove_nan_rows(self, data):
+        """
+        This function removes rows of nan's from the input array. If the input
+        array contains nan's only, then an array of ones of the size
+        (1 x n_features) is returned.
+
+        **Parameters:**
+
+        ``data`` : 2D :py:class:`numpy.ndarray`
+            An input array of features. Rows - samples, columns - features.
+
+        **Returns:**
+
+        ``ret_arr`` : 2D :py:class:`numpy.ndarray`
+           Array of features without nan samples. Rows - samples, columns - features.
+        """
+
+        d = np.vstack(data)
+
+        ret_arr = d[~np.isnan(d.sum(axis=1)), :]
+
+        if ret_arr.shape[0] == 0: # if array is empty, return array of ones
+
+            ret_arr = np.ones((1, ret_arr.shape[1]))
+
+        return ret_arr
+
+
+    #==========================================================================
+    def cluster_5quantities(self, arr, window_size, overlap):
+        """
+        Calculates the clustered values as described at the paper: Counter-
+        Measures to Photo Attacks in Face Recognition: a public database and a
+        baseline, Anjos & Marcel, IJCB'11.
+
+        This script will output a number of clustered observations containing the 5
+        described quantities for windows of a configurable size (N):
+
+            1. The minimum value observed on the cluster
+            2. The maximum value observed on the cluster
+            3. The mean value observed
+            4. The standard deviation on the cluster (unbiased estimator)
+            5. The DC ratio (D) as defined by:
+
+        .. math::
+
+            D(N) = \frac{\sum_{i=1}^N{|FFT_i|}}{|FFT_0|}
+
+        .. note::
+
+            We always ignore the first entry from the input array as, by
+            definition, it is always zero.
+
+        **Parameters:**
+
+        ``arr`` : 1D :py:class:`numpy.ndarray`
+            A 1D array containg frame differences.
+
+        ``window_size`` : :py:class:`int`
+            The size of the window to use for feature computation.
+
+        ``overlap`` : :py:class:`int`
+            Determines the window overlapping; this number has to be between
+            0 (no overlapping) and 'window-size'-1.
+
+        **Returns:**
+
+        ``retval`` : 2D :py:class:`numpy.ndarray`
+            Array of features without nan samples. Rows - samples, columns - features.
+            Here sample corresponds to features computed from the particular
+            window of the length ``window_size``.
+        """
+
+        retval = np.ndarray((arr.shape[0], 5), dtype='float64')
+        retval[:] = np.NaN
+
+        for k in range(0, arr.shape[0] - window_size + 1, window_size - overlap):
+
+            obs = arr[k:k + window_size].copy()
+
+            # replace NaN values by set mean so they don't disturb calculations
+            # much
+            ok = obs[~np.isnan(obs)]
+
+            obs[np.isnan(obs)] = ok.mean()
+
+            retval[k + window_size - 1] = \
+                (obs.min(), obs.max(), obs.mean(), obs.std(ddof=1), self.dcratio(obs))
+
+        retval = self.remove_nan_rows(retval) # clean-up nan's in the array
+
+        return retval
+
+
+    #==========================================================================
+    def convert_arr_to_frame_cont(self, data):
+        """
+        This function converts an array of samples into a FrameContainer, where
+        each frame stores features of a particular sample.
+
+        **Parameters:**
+
+        ``data`` : 2D :py:class:`numpy.ndarray`
+            An input array of features of the size
+            (Nr. of samples X Nr. of features).
+
+        **Returns:**
+
+        ``frames`` : FrameContainer
+            Resulting FrameContainer, where each frame stores features of
+            a particular sample.
+        """
+
+        frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        for idx, sample in enumerate(data):
+
+            frames.add(idx, sample)
+
+        return frames
+
+
+    #==========================================================================
+    def comp_features(self, data, window_size, overlap):
+        """
+        This function computes features for frame differences in the facial and
+        non-facial regions.
+
+        **Parameters:**
+
+        ``data`` : 2D :py:class:`numpy.ndarray`
+            An input array of frame differences in facial and non-facial regions.
+            The first column contains frame differences of facial regions.
+            The second column contains frame differences of non-facial/background regions.
+
+        ``window_size`` : :py:class:`int`
+            The size of the window to use for feature computation.
+
+        ``overlap`` : :py:class:`int`
+            Determines the window overlapping; this number has to be between
+            0 (no overlapping) and 'window-size'-1. Default: 0.
+
+        **Returns:**
+
+        ``frames`` : FrameContainer
+            Features describing frame differences, stored in the FrameContainer.
+        """
+
+        d_face = self.cluster_5quantities( data[:, 0], window_size, overlap )
+
+        d_bg = self.cluster_5quantities( data[:, 1], window_size, overlap )
+
+        features = np.hstack((d_face, d_bg))
+
+        frames = self.convert_arr_to_frame_cont(features)
+
+        return frames
+
+
+    #==========================================================================
+    def __call__(self, data):
+        """
+        This function computes features for frame differences in the facial and
+        non-facial regions.
+
+        **Parameters:**
+
+        ``data`` : 2D :py:class:`numpy.ndarray`
+            An input array of frame differences in facial and non-facial regions.
+            The first column contains frame differences of facial regions.
+            The second column contains frame differences of non-facial/background regions.
+
+        **Returns:**
+
+        ``frames`` : FrameContainer
+            Features describing frame differences, stored in the FrameContainer.
+        """
+
+        frames = self.comp_features(data, self.window_size, self.overlap)
+
+        return frames
+
+
+    #==========================================================================
+    def write_feature(self, frames, file_name):
+        """
+        Writes the given data (that has been generated using the __call__ function of this class) to file.
+        This method overwrites the write_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``frames`` :
+            Data returned by the __call__ method of the class.
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+        """
+
+        bob.bio.video.extractor.Wrapper(Extractor()).write_feature(frames, file_name)
+
+
+    #==========================================================================
+    def read_feature(self, file_name):
+        """
+        Reads the preprocessed data from file.
+        This method overwrites the read_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+
+        **Returns:**
+
+        ``frames`` : :py:class:`bob.bio.video.FrameContainer`
+            Frames stored in the frame container.
+        """
+
+        frames = bob.bio.video.extractor.Wrapper(Extractor()).read_feature(file_name)
+
+        return frames
+
+
+
diff --git a/bob/pad/face/extractor/__init__.py b/bob/pad/face/extractor/__init__.py
index 18f4b835c9389d6486d8e548a817784646fbb339..310989b454bdd1ebb73ef43b6c4f4a9fd56a72e6 100644
--- a/bob/pad/face/extractor/__init__.py
+++ b/bob/pad/face/extractor/__init__.py
@@ -3,6 +3,7 @@ from .VideoLBPHistogram import VideoLBPHistogram
 from .ImageQualityMeasure import ImageQualityMeasure
 from .VideoDataLoader import VideoDataLoader
 from .VideoQualityMeasure import VideoQualityMeasure
+from .FrameDiffFeatures import FrameDiffFeatures
 
 def __appropriate__(*args):
     """Says object was actually declared here, and not in the import module.
@@ -28,5 +29,6 @@ __appropriate__(
     ImageQualityMeasure,
     VideoQualityMeasure,
     VideoDataLoader,
+    FrameDiffFeatures,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/doc/references.rst b/doc/references.rst
index 911101849170b2dd52a7cd6d1a885887bf7b9166..9b34385ac989b36a6792fcf40dc75187603218b6 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -12,3 +12,6 @@ References
 
 .. [CBVM16] *A. Costa-Pazo, S. Bhattacharjee, E. Vazquez-Fernandez and S. Marcel*, **The Replay-Mobile Face Presentation-Attack Database**,
             in: Biometrics Special Interest Group (BIOSIG), 2016 BIOSIG - Proceedings of the International Conference of the, 2016, pp. 1-7.
+
+.. [AM11] *A. Anjos and S. Marcel*, **Counter-measures to photo attacks in face recognition: A public database and a baseline**,
+          in: 2011 International Joint Conference on Biometrics (IJCB), Washington, DC, 2011, pp. 1-7.
diff --git a/setup.py b/setup.py
index 338b4dcfe4b03fda511137b5ae065a7604822587..13ada3f681ca3b229b4a059ed700f8b52a976cd6 100644
--- a/setup.py
+++ b/setup.py
@@ -104,6 +104,7 @@ setup(
             # baselines:
             'lbp-svm = bob.pad.face.config.lbp_svm',
             'qm-svm = bob.pad.face.config.qm_svm',
+            'frame-diff-svm = bob.pad.face.config.frame_diff_svm',
             ],
 
         # registered preprocessors:
@@ -120,6 +121,8 @@ setup(
         'bob.pad.extractor': [
             'video-lbp-histogram-extractor-n8r1-uniform = bob.pad.face.config.extractor.video_lbp_histogram:video_lbp_histogram_extractor_n8r1_uniform',
             'video-quality-measure-galbally-msu = bob.pad.face.config.extractor.video_quality_measure:video_quality_measure_galbally_msu',
+            'frame-diff-feat-extr-w20-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w20_over0',
+            'frame-diff-feat-extr-w100-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w100_over0',
             ],
 
         # registered algorithms: