diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml
index 5828c802b7949e5330729875df219a7cb4507169..6489c109f75951127ce5701f50389680dea3b440 100644
--- a/.gitlab-ci.yml
+++ b/.gitlab-ci.yml
@@ -177,6 +177,24 @@ docs_linux_35:
     - conda-linux
 
 
+# Linux + Python 3.6: Builds and tests
+build_linux_36:
+  <<: *build_job
+  variables: &linux_36_build_variables
+    PYTHON_VERSION: "3.6"
+    WHEEL_TAG: "py3"
+  tags:
+    - conda-linux
+
+test_linux_36:
+  <<: *test_job
+  variables: *linux_36_build_variables
+  dependencies:
+    - build_linux_36
+  tags:
+    - conda-linux
+
+
 # Mac OSX + Python 2.7: Builds and tests
 build_macosx_27:
   <<: *build_job
@@ -211,3 +229,21 @@ test_macosx_35:
     - build_macosx_35
   tags:
     - conda-macosx
+
+
+# Mac OSX + Python 3.6: Builds and tests
+build_macosx_36:
+  <<: *build_job
+  variables: &macosx_36_build_variables
+    PYTHON_VERSION: "3.6"
+    WHEEL_TAG: "py3"
+  tags:
+    - conda-macosx
+
+test_macosx_36:
+  <<: *test_job
+  variables: *macosx_36_build_variables
+  dependencies:
+    - build_macosx_36
+  tags:
+    - conda-macosx
diff --git a/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py b/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
new file mode 100644
index 0000000000000000000000000000000000000000..e99f7572ee5560bc808643c86c96534569faf9e6
--- /dev/null
+++ b/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
@@ -0,0 +1,785 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Wed May 17 09:43:09 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.pad.base.algorithm import Algorithm
+
+import itertools as it
+
+import numpy as np
+
+import bob.learn.libsvm
+
+import bob.io.base
+
+import os
+
+#==============================================================================
+# Main body :
+
+class VideoSvmPadAlgorithm(Algorithm):
+    """
+    This class is designed to train SVM given Frame Containers with features
+    of real and attack classes. The trained SVM is then used to classify the
+    testing data as either real or attack. The SVM is trained in two stages.
+    First, the best parameters for SVM are estimated using train and
+    cross-validation subsets. The size of the subsets used in hyper-parameter
+    tuning is defined by ``n_samples`` parameter of this class. Once best
+    parameters are determined, the SVM machine is trained using complete training
+    set.
+
+    **Parameters:**
+
+    ``machine_type`` : :py:class:`str`
+        A type of the SVM machine. Please check ``bob.learn.libsvm`` for
+        more details. Default: 'C_SVC'.
+
+    ``kernel_type`` : :py:class:`str`
+        A type of kerenel for the SVM machine. Please check ``bob.learn.libsvm``
+        for more details. Default: 'RBF'.
+
+    ``n_samples`` : :py:class:`int`
+        Number of uniformly selected feature vectors per class defining the
+        sizes of sub-sets used in the hyper-parameter grid search.
+
+    ``trainer_grid_search_params`` : :py:class:`dict`
+        Dictionary containing the hyper-parameters of the SVM to be tested
+        in the grid-search.
+        Default: {'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]}.
+
+    ``mean_std_norm_flag`` : :py:class:`bool`
+        Perform mean-std normalization of data if set to True. Default: False.
+
+    ``frame_level_scores_flag`` : :py:class:`bool`
+        Return scores for each frame individually if True. Otherwise, return a
+        single score per video. Default: False.
+    """
+
+    def __init__(self,
+                 machine_type = 'C_SVC',
+                 kernel_type = 'RBF',
+                 n_samples = 10000,
+                 trainer_grid_search_params = { 'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]},
+                 mean_std_norm_flag = False,
+                 frame_level_scores_flag = False):
+
+
+        Algorithm.__init__(self,
+                           machine_type = machine_type,
+                           kernel_type = kernel_type,
+                           n_samples = n_samples,
+                           trainer_grid_search_params = trainer_grid_search_params,
+                           mean_std_norm_flag = mean_std_norm_flag,
+                           frame_level_scores_flag = frame_level_scores_flag,
+                           performs_projection=True,
+                           requires_projector_training=True)
+
+        self.machine_type = machine_type
+        self.kernel_type = kernel_type
+        self.n_samples = n_samples
+        self.trainer_grid_search_params = trainer_grid_search_params
+        self.mean_std_norm_flag = mean_std_norm_flag
+        self.frame_level_scores_flag = frame_level_scores_flag
+        self.machine = None
+
+
+    #==========================================================================
+    def convert_frame_cont_to_array(self, frame_container):
+        """
+        This function converts a single Frame Container into an array of features.
+        The rows are samples, the columns are features.
+
+        **Parameters:**
+
+        ``frame_container`` : object
+            A Frame Container conteining the features of an individual,
+            see ``bob.bio.video.utils.FrameContainer``.
+
+        **Returns:**
+
+        ``features_array`` : 2D :py:class:`numpy.ndarray`
+            An array containing features for all frames.
+            The rows are samples, the columns are features.
+        """
+
+        feature_vectors = []
+
+        frame_dictionary = {}
+
+        for frame in frame_container:
+
+            frame_dictionary[frame[0]] = frame[1]
+
+        for idx, _ in enumerate(frame_container):
+
+            # Frames are stored in a mixed order, therefore we get them using incrementing frame index:
+            feature_vectors.append(frame_dictionary[str(idx)])
+
+        features_array = np.vstack(feature_vectors)
+
+        return features_array
+
+
+    #==========================================================================
+    def convert_list_of_frame_cont_to_array(self, frame_containers):
+        """
+        This function converts a list of Frame containers into an array of features.
+        Features from different frame containers (individuals) are concatenated into the
+        same list. This list is then converted to an array. The rows are samples,
+        the columns are features.
+
+        **Parameters:**
+
+        ``frame_containers`` : :py:class:`list`
+            Each element in the list is a Frame Container, , see ``bob.bio.video.utils.FrameContainer``.
+            Each frame Container conteins feature vectors for the particular individual/person.
+
+        **Returns:**
+
+        ``features_array`` : 2D :py:class:`numpy.ndarray`
+            An array containing features for all frames of all individuals.
+        """
+
+        feature_vectors = []
+
+        for frame_container in frame_containers:
+
+            video_features_array = self.convert_frame_cont_to_array(frame_container)
+
+            feature_vectors.append( video_features_array )
+
+        features_array = np.vstack(feature_vectors)
+
+        return features_array
+
+
+    #==========================================================================
+    def combinations(self, input_dict):
+        """
+        Obtain all possible key-value combinations in the input dictionary
+        containing list values.
+
+        **Parameters:**
+
+        ``input_dict`` : :py:class:`dict`
+            Input dictionary with list values.
+
+        **Returns:**
+
+        ``combinations`` : :py:class:`list`
+            List of dictionaries containing the combinations.
+        """
+
+        varNames = sorted(input_dict)
+
+        combinations = [ dict( zip( varNames, prod ) ) for prod in it.product( *( input_dict[ varName ] for varName in varNames ) ) ]
+
+        return combinations
+
+
+    #==========================================================================
+    def select_uniform_data_subset(self, features, n_samples):
+        """
+        Uniformly select N samples/feature vectors from the input array of samples.
+        The rows in the input array are samples. The columns are features.
+
+        **Parameters:**
+
+        ``features`` : 2D :py:class:`numpy.ndarray`
+            Input array with feature vectors. The rows are samples, columns are features.
+
+        ``n_samples`` : :py:class:`int`
+            The number of samples to be selected uniformly from the input array of features.
+
+        **Returns:**
+
+        ``features_subset`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of features.
+        """
+
+        if features.shape[0] <= n_samples:
+
+            features_subset = features
+
+        else:
+
+            uniform_step = features.shape[0]/n_samples
+
+            features_subset = features[0 : uniform_step*n_samples : uniform_step, :]
+
+        return features_subset
+
+
+    #==========================================================================
+    def split_data_to_train_cv(self, features):
+        """
+        This function is designed to split the input array of features into two
+        subset namely train and cross-validation. These subsets can be used to tune the
+        hyper-parameters of the SVM. The splitting is 50/50, the first half of the
+        samples in the input are selected to be train set, and the second half of
+        samples is cross-validation.
+
+        **Parameters:**
+
+        ``features`` : 2D :py:class:`numpy.ndarray`
+            Input array with feature vectors. The rows are samples, columns are features.
+
+        **Returns:**
+
+        ``features_train`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of train features.
+
+        ``features_cv`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of cross-validation features.
+        """
+
+        half_samples_num = features.shape[0]/2
+
+        features_train = features[ 0 : half_samples_num, : ]
+        features_cv = features[ half_samples_num : 2 * half_samples_num + 1, : ]
+
+        return features_train, features_cv
+
+
+    #==========================================================================
+    def prepare_data_for_hyper_param_grid_search(self, training_features, n_samples):
+        """
+        This function converts a list of all training features returned by ``read_features``
+        method of the extractor to the subsampled train and cross-validation arrays for both
+        real and attack classes.
+
+        **Parameters:**
+
+        ``training_features`` : :py:class:`list`
+            A list containing two elements: [0] - a list of Frame Containers with
+            feature vectors fot the real class; [1] - a list of Frame Containers with
+            feature vectors fot the attack class.
+
+        ``n_samples`` : :py:class:`int`
+            Number of uniformly selected feature vectors per class.
+
+        **Returns:**
+
+        ``real_train`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of train features for the real class.
+            The number of samples in this set is n_samples/2, which is defined
+            by split_data_to_train_cv method of this class.
+
+        ``real_cv`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of cross-validation features for the real class.
+            The number of samples in this set is n_samples/2, which is defined
+            by split_data_to_train_cv method of this class.
+
+        ``attack_train`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of train features for the attack class.
+            The number of samples in this set is n_samples/2, which is defined
+            by split_data_to_train_cv method of this class.
+
+        ``attack_cv`` : 2D :py:class:`numpy.ndarray`
+            Selected subset of cross-validation features for the attack class.
+            The number of samples in this set is n_samples/2, which is defined
+            by split_data_to_train_cv method of this class.
+        """
+
+        # training_features[0] - training features for the REAL class.
+        real = self.convert_list_of_frame_cont_to_array(training_features[0]) # output is array
+        # training_features[1] - training features for the ATTACK class.
+        attack = self.convert_list_of_frame_cont_to_array(training_features[1]) # output is array
+
+        # uniformly select subsets of features:
+        real_subset = self.select_uniform_data_subset(real, n_samples)
+        attack_subset = self.select_uniform_data_subset(attack, n_samples)
+
+        # split the data into train and cross-validation:
+        real_train, real_cv = self.split_data_to_train_cv(real_subset)
+        attack_train, attack_cv = self.split_data_to_train_cv(attack_subset)
+
+        return real_train, real_cv, attack_train, attack_cv
+
+
+    #==========================================================================
+    def comp_prediction_precision(self, machine, real, attack):
+        """
+        This function computes the precision of the predictions as a ratio
+        of correctly classified samples to the total number of samples.
+
+        **Parameters:**
+
+        ``machine`` : object
+            A pre-trained SVM machine.
+
+        ``real`` : 2D :py:class:`numpy.ndarray`
+            Array of features representing the real class.
+
+        ``attack`` : 2D :py:class:`numpy.ndarray`
+            Array of features representing the attack class.
+
+        **Returns:**
+
+        ``precision`` : :py:class:`float`
+            The precision of the predictions.
+        """
+
+        labels_real = machine.predict_class(real)
+
+        labels_attack = machine.predict_class(attack)
+
+        samples_num = len(labels_real) + len(labels_attack)
+
+        precision = ( np.sum(labels_real == 1) + np.sum(labels_attack == -1) ).astype( np.float ) / samples_num
+
+        return precision
+
+
+    #==========================================================================
+    def mean_std_normalize(self, features, features_mean= None, features_std = None):
+        """
+        The features in the input 2D array are mean-std normalized.
+        The rows are samples, the columns are features. If ``features_mean``
+        and ``features_std`` are provided, then these vectors will be used for
+        normalization. Otherwise, the mean and std of the features is
+        computed on the fly.
+
+        **Parameters:**
+
+        ``features`` : 2D :py:class:`numpy.ndarray`
+            Array of features to be normalized.
+
+        ``features_mean`` : 1D :py:class:`numpy.ndarray`
+            Mean of the features. Default: None.
+
+        ``features_std`` : 2D :py:class:`numpy.ndarray`
+            Standart deviation of the features. Default: None.
+
+        **Returns:**
+
+        ``features_norm`` : 2D :py:class:`numpy.ndarray`
+            Normalized array of features.
+
+        ``features_mean`` : 1D :py:class:`numpy.ndarray`
+            Mean of the features.
+
+        ``features_std`` : 2D :py:class:`numpy.ndarray`
+            Standart deviation of the features.
+        """
+
+        features = np.copy(features)
+
+        # Compute mean and std if not given:
+        if features_mean is None:
+
+            features_mean = np.mean(features, axis=0)
+
+            features_std = np.std(features, axis=0)
+
+        row_norm_list = []
+
+        for row in features: # row is a sample
+
+            row_norm = (row - features_mean) / features_std
+
+            row_norm_list.append(row_norm)
+
+        features_norm = np.vstack(row_norm_list)
+
+        return features_norm, features_mean, features_std
+
+
+    #==========================================================================
+    def norm_train_cv_data(self, real_train, real_cv, attack_train, attack_cv):
+        """
+        Mean-std normalization of train and cross-validation data arrays.
+
+        **Parameters:**
+
+        ``real_train`` : 2D :py:class:`numpy.ndarray`
+            Subset of train features for the real class.
+
+        ``real_cv`` : 2D :py:class:`numpy.ndarray`
+            Subset of cross-validation features for the real class.
+
+        ``attack_train`` : 2D :py:class:`numpy.ndarray`
+            Subset of train features for the attack class.
+
+        ``attack_cv`` : 2D :py:class:`numpy.ndarray`
+            Subset of cross-validation features for the attack class.
+
+        **Returns:**
+
+        ``real_train_norm`` : 2D :py:class:`numpy.ndarray`
+            Normalized subset of train features for the real class.
+
+        ``real_cv_norm`` : 2D :py:class:`numpy.ndarray`
+            Normalized subset of cross-validation features for the real class.
+
+        ``attack_train_norm`` : 2D :py:class:`numpy.ndarray`
+            Normalized subset of train features for the attack class.
+
+        ``attack_cv_norm`` : 2D :py:class:`numpy.ndarray`
+            Normalized subset of cross-validation features for the attack class.
+        """
+
+        features_train = np.vstack([real_train, attack_train])
+
+        features_train_norm, features_mean, features_std = self.mean_std_normalize(features_train)
+
+        real_train_norm = features_train_norm[0:real_train.shape[0], :]
+
+        attack_train_norm = features_train_norm[real_train.shape[0]:, :]
+
+        real_cv_norm, _, _ = self.mean_std_normalize(real_cv, features_mean, features_std)
+
+        attack_cv_norm, _, _ = self.mean_std_normalize(attack_cv, features_mean, features_std)
+
+        return real_train_norm, real_cv_norm, attack_train_norm, attack_cv_norm
+
+
+    #==========================================================================
+    def train_svm(self, training_features, n_samples = 10000,
+                  machine_type = 'C_SVC', kernel_type = 'RBF',
+                  trainer_grid_search_params = { 'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]},
+                  mean_std_norm_flag = False,
+                  projector_file = ""):
+        """
+        First, this function tunes the hyper-parameters of the SVM classifier using
+        grid search on the sub-sets of training data. Train and cross-validation
+        subsets for both classes are formed from the available input training_features.
+
+        Once successfull parameters are determined the SVM is trained on the
+        whole training data set. The resulting machine is returned by the function.
+
+        **Parameters:**
+
+        ``training_features`` : :py:class:`list`
+            A list containing two elements: [0] - a list of Frame Containers with
+            feature vectors fot the real class; [1] - a list of Frame Containers with
+            feature vectors fot the attack class.
+
+        ``n_samples`` : :py:class:`int`
+            Number of uniformly selected feature vectors per class defining the
+            sizes of sub-sets used in the hyper-parameter grid search.
+
+        ``machine_type`` : :py:class:`str`
+            A type of the SVM machine. Please check ``bob.learn.libsvm`` for
+            more details.
+
+        ``kernel_type`` : :py:class:`str`
+            A type of kerenel for the SVM machine. Please check ``bob.learn.libsvm``
+            for more details.
+
+        ``trainer_grid_search_params`` : :py:class:`dict`
+            Dictionary containing the hyper-parameters of the SVM to be tested
+            in the grid-search.
+
+        ``mean_std_norm_flag`` : :py:class:`bool`
+            Perform mean-std normalization of data if set to True. Default: False.
+
+        ``projector_file`` : :py:class:`str`
+            The name of the file to save the trained projector to. Only the path
+            of this file is used in this function. The file debug_data.hdf5 will
+            be save in this path. This file contains information, which might be
+            usefull for debugging.
+
+        **Returns:**
+
+        ``machine`` : object
+            A trained SVM machine.
+        """
+
+        # get the data for the hyper-parameter grid-search:
+        real_train, real_cv, attack_train, attack_cv = self.prepare_data_for_hyper_param_grid_search(training_features, n_samples)
+
+        if mean_std_norm_flag:
+            # normalize the data:
+            real_train, real_cv, attack_train, attack_cv = self.norm_train_cv_data(real_train, real_cv, attack_train, attack_cv)
+
+        precisions_cv = [] # for saving the precision on the cross-validation set
+
+        precisions_train = []
+
+        trainer_grid_search_params_list = self.combinations(trainer_grid_search_params) # list containing all combinations of params
+
+        for trainer_grid_search_param in trainer_grid_search_params_list:
+
+            # initialize the SVM trainer:
+            trainer = bob.learn.libsvm.Trainer(machine_type = machine_type,
+                                               kernel_type = kernel_type,
+                                               probability = True)
+
+            for key in trainer_grid_search_param.keys():
+
+                setattr(trainer, key, trainer_grid_search_param[key]) # set the params of trainer
+
+            data  = [np.copy(real_train), np.copy(attack_train)] # data used for training the machine in the grid-search
+
+            machine = trainer.train(data) # train the machine
+
+            precision_cv = self.comp_prediction_precision(machine, np.copy(real_cv), np.copy(attack_cv))
+
+            precision_train = self.comp_prediction_precision(machine, np.copy(real_train), np.copy(attack_train))
+
+            precisions_cv.append(precision_cv)
+
+            precisions_train.append(precision_train)
+
+            del data
+            del machine
+            del trainer
+
+        selected_params = trainer_grid_search_params_list[np.argmax(precisions_cv)] # best SVM parameters according to CV set
+
+        trainer = bob.learn.libsvm.Trainer(machine_type = machine_type,
+                                           kernel_type = kernel_type,
+                                           probability = True)
+
+        for key in selected_params.keys():
+
+            setattr(trainer, key, selected_params[key]) # set the params of trainer
+
+        # Save the data, which is usefull for debugging.
+        debug_file = os.path.join( os.path.split(projector_file)[0], "debug_data.hdf5" )
+        debug_dict = {}
+        debug_dict['precisions_train'] = precisions_train
+        debug_dict['precisions_cv'] = precisions_cv
+        debug_dict['cost'] = selected_params['cost']
+        debug_dict['gamma'] = selected_params['gamma']
+        f = bob.io.base.HDF5File(debug_file, 'w') # open hdf5 file to save the debug data
+        for key in debug_dict.keys():
+            f.set(key, debug_dict[key])
+        del f
+
+        # training_features[0] - training features for the REAL class.
+        real = self.convert_list_of_frame_cont_to_array(training_features[0]) # output is array
+        # training_features[1] - training features for the ATTACK class.
+        attack = self.convert_list_of_frame_cont_to_array(training_features[1]) # output is array
+
+        if mean_std_norm_flag:
+            # Normalize the data:
+            features = np.vstack([real, attack])
+            features_norm, features_mean, features_std = self.mean_std_normalize(features)
+            real =   features_norm[0:real.shape[0], :] # The array is now normalized
+            attack = features_norm[real.shape[0]:, :] # The array is now normalized
+
+        data = [np.copy(real), np.copy(attack)] # data for final training
+
+        machine = trainer.train(data) # train the machine
+
+        if mean_std_norm_flag:
+            machine.input_subtract = features_mean # subtract the mean of train data
+            machine.input_divide   = features_std  # divide by std of train data
+
+        del data
+
+        return machine
+
+
+    #==========================================================================
+    def train_projector(self, training_features, projector_file):
+        """
+        Train SVM feature projector and save the trained SVM to a given file.
+        The ``requires_projector_training = True`` flag must be set to True to
+        enable this function.
+
+        **Parameters:**
+
+        ``training_features`` : :py:class:`list`
+            A list containing two elements: [0] - a list of Frame Containers with
+            feature vectors fot the real class; [1] - a list of Frame Containers with
+            feature vectors fot the attack class.
+
+        ``projector_file`` : :py:class:`str`
+            The file to save the trained projector to.
+            This file should be readable with the :py:meth:`load_projector` function.
+        """
+
+        machine = self.train_svm(training_features = training_features,
+                                 n_samples = self.n_samples,
+                                 machine_type = self.machine_type,
+                                 kernel_type = self.kernel_type,
+                                 trainer_grid_search_params = self.trainer_grid_search_params,
+                                 mean_std_norm_flag = self.mean_std_norm_flag,
+                                 projector_file = projector_file)
+
+        f = bob.io.base.HDF5File(projector_file, 'w') # open hdf5 file to save to
+
+        machine.save(f) # save the machine and normalization parameters
+
+        del f
+
+
+    #==========================================================================
+    def load_projector(self, projector_file):
+        """
+        Load the pretrained projector/SVM from file to perform a feature projection.
+        This function usually is useful in combination with the
+        :py:meth:`train_projector` function.
+
+        Please register `performs_projection = True` in the constructor to
+        enable this function.
+
+        **Parameters:**
+
+        ``projector_file`` : :py:class:`str`
+            The file to read the projector from.
+        """
+
+        f = bob.io.base.HDF5File(projector_file, 'a')
+
+        self.machine = bob.learn.libsvm.Machine(f)
+
+        del f
+
+
+    #==========================================================================
+    def project(self, feature):
+        """
+        This function computes class probabilities for the input feature using pretrained SVM.
+        The feature in this case is a Frame Container with features for each frame.
+        The probabilities will be computed and returned for each frame.
+
+        Set ``performs_projection = True`` in the constructor to enable this function.
+        It is assured that the :py:meth:`load_projector` was called before the
+        ``project`` function is executed.
+
+        **Parameters:**
+
+        ``feature`` : object
+            A Frame Container conteining the features of an individual,
+            see ``bob.bio.video.utils.FrameContainer``.
+
+        **Returns:**
+
+        ``probabilities`` : 2D :py:class:`numpy.ndarray`
+            An array containing class probabilities for each frame.
+            First column contains probabilities for each frame being a real class.
+            Second column contains probabilities for each frame being an attack class.
+            Must be writable with the :py:meth:`write_feature` function and
+            readable with the :py:meth:`read_feature` function.
+        """
+
+        features_array = self.convert_frame_cont_to_array(feature)
+
+        probabilities = self.machine.predict_class_and_probabilities(features_array)[1]
+
+#        probabilities = self.machine.predict_class_and_scores(features_array)[1]
+
+        return probabilities
+
+
+    #==========================================================================
+    def score(self, toscore):
+        """
+        Returns a probability of a sample being a real class.
+
+        **Parameters:**
+
+        ``toscore`` : 2D :py:class:`numpy.ndarray`
+            An array containing class probabilities for each frame.
+            First column contains probabilities for each frame being a real class.
+            Second column contains probabilities for each frame being an attack class.
+
+        **Returns:**
+
+        ``score`` : :py:class:`float`
+            or a list of scores containing individual score for each frame.
+            A score value for the object ``toscore``.
+            A probability of a sample being a real class.
+        """
+
+        if self.frame_level_scores_flag:
+
+            score = toscore[:,0] # here score is a list containing scores for each frame
+
+        else:
+
+            score = np.mean(toscore, axis=0)[0] # compute a single score per video
+
+        return score
+
+
+    #==========================================================================
+    def score_for_multiple_projections(self, toscore):
+        """
+        Returns a list of scores computed by the score method of this class.
+
+        **Parameters:**
+
+        ``toscore`` : 2D :py:class:`numpy.ndarray`
+            An array containing scores computed by score() method of this class.
+
+        **Returns:**
+
+        ``list_of_scores`` : list
+            A list containing the scores.
+        """
+
+#        import ipdb; ipdb.set_trace()
+
+        if self.frame_level_scores_flag:
+
+            list_of_scores = self.score(toscore)
+
+        else:
+
+            list_of_scores = [self.score(toscore)]
+
+        return list_of_scores
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/bob/pad/face/algorithm/__init__.py b/bob/pad/face/algorithm/__init__.py
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..77265bf492e0f894c2d938013414ba689bb20ab1 100644
--- a/bob/pad/face/algorithm/__init__.py
+++ b/bob/pad/face/algorithm/__init__.py
@@ -0,0 +1,25 @@
+from .VideoSvmPadAlgorithm import VideoSvmPadAlgorithm
+
+
+def __appropriate__(*args):
+    """Says object was actually declared here, and not in the import module.
+    Fixing sphinx warnings of not being able to find classes, when path is
+    shortened.
+
+    Parameters
+    ----------
+    *args
+        The objects that you want sphinx to beleive that are defined here.
+
+    Resolves `Sphinx referencing issues <https//github.com/sphinx-
+    doc/sphinx/issues/3048>`
+    """
+
+    for obj in args:
+        obj.__module__ = __name__
+
+
+__appropriate__(
+    VideoSvmPadAlgorithm,
+)
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/pad/face/config/algorithm/__init__.py b/bob/pad/face/config/algorithm/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py b/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
new file mode 100644
index 0000000000000000000000000000000000000000..cd6fc1ad3081e0409cb3769b17923fcc5180fc97
--- /dev/null
+++ b/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
@@ -0,0 +1,49 @@
+#!/usr/bin/env python
+
+from bob.pad.face.algorithm import VideoSvmPadAlgorithm
+
+
+#=======================================================================================
+# Define instances here:
+
+machine_type = 'C_SVC'
+kernel_type = 'RBF'
+n_samples = 10000
+# trainer_grid_search_params = {'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]}
+trainer_grid_search_params = {'cost': [2**p for p in range(-3, 14, 2)], 'gamma': [2**p for p in range(-15, 0, 2)]}
+mean_std_norm_flag = False
+frame_level_scores_flag = False # one score per video(!) in this case
+
+video_svm_pad_algorithm_10k_grid = VideoSvmPadAlgorithm(machine_type = machine_type,
+                                                        kernel_type = kernel_type,
+                                                        n_samples = n_samples,
+                                                        trainer_grid_search_params = trainer_grid_search_params,
+                                                        mean_std_norm_flag = mean_std_norm_flag,
+                                                        frame_level_scores_flag = frame_level_scores_flag)
+
+mean_std_norm_flag = True # enable mean-std normalization
+
+video_svm_pad_algorithm_10k_grid_mean_std = VideoSvmPadAlgorithm(machine_type = machine_type,
+                                                                 kernel_type = kernel_type,
+                                                                 n_samples = n_samples,
+                                                                 trainer_grid_search_params = trainer_grid_search_params,
+                                                                 mean_std_norm_flag = mean_std_norm_flag,
+                                                                 frame_level_scores_flag = frame_level_scores_flag)
+
+frame_level_scores_flag = True # one score per frame(!) in this case
+
+video_svm_pad_algorithm_10k_grid_mean_std_frame_level = VideoSvmPadAlgorithm(machine_type = machine_type,
+                                                                             kernel_type = kernel_type,
+                                                                             n_samples = n_samples,
+                                                                             trainer_grid_search_params = trainer_grid_search_params,
+                                                                             mean_std_norm_flag = mean_std_norm_flag,
+                                                                             frame_level_scores_flag = frame_level_scores_flag)
+
+trainer_grid_search_params = {'cost': [1], 'gamma': [0]} # set the default LibSVM parameters
+
+video_svm_pad_algorithm_default_svm_param_mean_std_frame_level = VideoSvmPadAlgorithm(machine_type = machine_type,
+                                                                                      kernel_type = kernel_type,
+                                                                                      n_samples = n_samples,
+                                                                                      trainer_grid_search_params = trainer_grid_search_params,
+                                                                                      mean_std_norm_flag = mean_std_norm_flag,
+                                                                                      frame_level_scores_flag = frame_level_scores_flag)
\ No newline at end of file
diff --git a/bob/pad/face/config/extractor/__init__.py b/bob/pad/face/config/extractor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/pad/face/config/extractor/video_lbp_histogram.py b/bob/pad/face/config/extractor/video_lbp_histogram.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee0ec7f7a229961e33514b99ac45e6e018376fba
--- /dev/null
+++ b/bob/pad/face/config/extractor/video_lbp_histogram.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+from bob.pad.face.extractor import VideoLBPHistogram
+
+
+#=======================================================================================
+# Define instances here:
+
+lbptype='uniform'
+elbptype='regular'
+rad=1
+neighbors=8
+circ=False
+dtype=None
+
+video_lbp_histogram_extractor_n8r1_uniform = VideoLBPHistogram(lbptype=lbptype,
+                                                               elbptype=elbptype,
+                                                               rad=rad,
+                                                               neighbors=neighbors,
+                                                               circ=circ,
+                                                               dtype=dtype)
diff --git a/bob/pad/face/config/extractor/video_quality_measure.py b/bob/pad/face/config/extractor/video_quality_measure.py
new file mode 100644
index 0000000000000000000000000000000000000000..f6910cc89cdf4cd58d1fe16027a6d83350b60264
--- /dev/null
+++ b/bob/pad/face/config/extractor/video_quality_measure.py
@@ -0,0 +1,15 @@
+#!/usr/bin/env python
+
+from bob.pad.face.extractor import VideoQualityMeasure
+
+
+#=======================================================================================
+# Define instances here:
+
+galbally=True
+msu=True
+dtype=None
+
+video_quality_measure_galbally_msu = VideoQualityMeasure(galbally=galbally,
+                                                         msu=msu,
+                                                         dtype=dtype)
diff --git a/bob/pad/face/config/grid.py b/bob/pad/face/config/grid.py
new file mode 100644
index 0000000000000000000000000000000000000000..12d4c4059b0ced23327c5c6cbaa7a36cb06d13eb
--- /dev/null
+++ b/bob/pad/face/config/grid.py
@@ -0,0 +1,44 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+
+from bob.bio.base.grid import Grid
+
+# Configuration to run on computation cluster:
+idiap = Grid(
+    training_queue='32G',
+
+    number_of_preprocessing_jobs=32,
+    preprocessing_queue='4G-io-big',
+
+    number_of_extraction_jobs=32,
+    extraction_queue='8G-io-big',
+
+    number_of_projection_jobs=32,
+    projection_queue='8G-io-big',
+
+    number_of_enrollment_jobs=32,
+    enrollment_queue='8G-io-big',
+
+    number_of_scoring_jobs=1,
+    scoring_queue='8G-io-big',
+    )
+
+# Configuration to run on user machines:
+idiap_user_machines = Grid(
+    training_queue='32G',
+
+    number_of_preprocessing_jobs=32,
+    preprocessing_queue='4G',
+
+    number_of_extraction_jobs=32,
+    extraction_queue='8G',
+
+    number_of_projection_jobs=32,
+    projection_queue='8G',
+
+    number_of_enrollment_jobs=32,
+    enrollment_queue='8G',
+
+    number_of_scoring_jobs=1,
+    scoring_queue='8G',
+    )
diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
new file mode 100644
index 0000000000000000000000000000000000000000..3c95b9850cf7bb4745c6174fe5fdd2a92d25d371
--- /dev/null
+++ b/bob/pad/face/config/lbp_svm.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+"""
+@author: Olegs Nikisins
+
+This file contains configurations to run LBP and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The idea of the algorithm is introduced in the following paper: [CAM12]_.
+However some settings are different from the ones introduced in the paper.
+"""
+
+
+#=======================================================================================
+sub_directory = 'lbp_svm'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+
+#=======================================================================================
+# define preprocessor:
+
+from ..preprocessor import VideoFaceCrop
+
+CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
+CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+FIXED_POSITIONS = None
+MASK_SIGMA = None             # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
+MASK_SEED = None              # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MIN_FACE_SIZE = 50            # Minimal possible size of the face
+USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
+COLOR_CHANNEL = 'gray'        # Convert image to gray-scale format
+
+preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
+                             cropped_positions = CROPPED_POSITIONS,
+                             fixed_positions = FIXED_POSITIONS,
+                             mask_sigma = MASK_SIGMA,
+                             mask_neighbors = MASK_NEIGHBORS,
+                             mask_seed = None,
+                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                             min_face_size = MIN_FACE_SIZE,
+                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
+                             color_channel = COLOR_CHANNEL)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
+below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``use_local_cropper_flag = True``.
+"""
+
+
+#=======================================================================================
+# define extractor:
+
+from ..extractor import VideoLBPHistogram
+
+LBPTYPE='uniform'
+ELBPTYPE='regular'
+RAD=1
+NEIGHBORS=8
+CIRC=False
+DTYPE=None
+
+extractor = VideoLBPHistogram(lbptype=LBPTYPE,
+                              elbptype=ELBPTYPE,
+                              rad=RAD,
+                              neighbors=NEIGHBORS,
+                              circ=CIRC,
+                              dtype=DTYPE)
+"""
+In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
+
+The parameters are similar to the ones introduced in [CAM12]_.
+"""
+
+
+#=======================================================================================
+# define algorithm:
+
+from ..algorithm import VideoSvmPadAlgorithm
+
+MACHINE_TYPE = 'C_SVC'
+KERNEL_TYPE = 'RBF'
+N_SAMPLES = 10000
+TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
+MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
+                                 kernel_type = KERNEL_TYPE,
+                                 n_samples = N_SAMPLES,
+                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
+                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
+                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+"""
+The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+
+In contrast to [CAM12]_, the grid search of SVM parameters is used to select the
+successful settings. The grid search is done on the subset of training data. The size
+of this subset is defined by ``n_samples`` parameter.
+
+The data is also mean-std normalized, ``mean_std_norm_flag = True``.
+"""
diff --git a/bob/pad/face/config/preprocessor/__init__.py b/bob/pad/face/config/preprocessor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/pad/face/config/preprocessor/filename.py b/bob/pad/face/config/preprocessor/filename.py
new file mode 100644
index 0000000000000000000000000000000000000000..feeefff065ac5566107737a9c2a817dedb1bef7b
--- /dev/null
+++ b/bob/pad/face/config/preprocessor/filename.py
@@ -0,0 +1,5 @@
+from bob.bio.base.preprocessor import Filename
+
+# This preprocessor does nothing, returning just the name of the file to extract the features from:
+empty_preprocessor = Filename()
+
diff --git a/bob/pad/face/config/preprocessor/frame_difference.py b/bob/pad/face/config/preprocessor/frame_difference.py
new file mode 100644
index 0000000000000000000000000000000000000000..349f172d2991113939ad131ec29c0989a920eca6
--- /dev/null
+++ b/bob/pad/face/config/preprocessor/frame_difference.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+from bob.pad.face.preprocessor import FrameDifference
+
+
+#=======================================================================================
+# Define instances here:
+
+NUMBER_OF_FRAMES = 200 # process at most 200 frames
+CHECK_FACE_SIZE_FLAG = True # Check size of the face
+MIN_FACE_SIZE = 50
+
+frame_diff_min_size_50_200_frames = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
+                                                    check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                                                    min_face_size = MIN_FACE_SIZE)
+
diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py
new file mode 100644
index 0000000000000000000000000000000000000000..5ab32d17af692c94c5735addffb382742dfec666
--- /dev/null
+++ b/bob/pad/face/config/preprocessor/video_face_crop.py
@@ -0,0 +1,70 @@
+#!/usr/bin/env python
+
+from bob.pad.face.preprocessor import VideoFaceCrop
+
+
+#=======================================================================================
+# Define instances here:
+
+cropped_image_size = (64, 64) # The size of the resulting face
+cropped_positions = {'topleft' : (0,0) , 'bottomright' : cropped_image_size}
+fixed_positions = None
+mask_sigma = None             # The sigma for random values areas outside image
+mask_neighbors = 5            # The number of neighbors to consider while extrapolating
+mask_seed = None              # The seed for generating random values during extrapolation
+check_face_size_flag = False  # Don't check the size of the face
+min_face_size = 50
+use_local_cropper_flag = False # Use the cropper of bob.bio.face
+color_channel = 'gray'        # Convert image to gray-scale format
+
+video_face_crop_preproc_64_64 = VideoFaceCrop(cropped_image_size = cropped_image_size,
+                                              cropped_positions = cropped_positions,
+                                              fixed_positions = fixed_positions,
+                                              mask_sigma = mask_sigma,
+                                              mask_neighbors = mask_neighbors,
+                                              mask_seed = None,
+                                              check_face_size_flag = check_face_size_flag,
+                                              min_face_size = min_face_size,
+                                              use_local_cropper_flag = use_local_cropper_flag,
+                                              color_channel = color_channel)
+
+check_face_size_flag = True  # Check the size of the face
+min_face_size = 50
+
+video_face_crop_preproc_64_64_face_50 = VideoFaceCrop(cropped_image_size = cropped_image_size,
+                                                      cropped_positions = cropped_positions,
+                                                      fixed_positions = fixed_positions,
+                                                      mask_sigma = mask_sigma,
+                                                      mask_neighbors = mask_neighbors,
+                                                      mask_seed = None,
+                                                      check_face_size_flag = check_face_size_flag,
+                                                      min_face_size = min_face_size,
+                                                      use_local_cropper_flag = use_local_cropper_flag,
+                                                      color_channel = color_channel)
+
+
+use_local_cropper_flag = True # Use the local face cropping class (identical to Ivana's paper)
+
+video_face_crop_preproc_64_64_face_50_local_cropper = VideoFaceCrop(cropped_image_size = cropped_image_size,
+                                                                    cropped_positions = cropped_positions,
+                                                                    fixed_positions = fixed_positions,
+                                                                    mask_sigma = mask_sigma,
+                                                                    mask_neighbors = mask_neighbors,
+                                                                    mask_seed = None,
+                                                                    check_face_size_flag = check_face_size_flag,
+                                                                    min_face_size = min_face_size,
+                                                                    use_local_cropper_flag = use_local_cropper_flag,
+                                                                    color_channel = color_channel)
+
+rgb_output_flag = True # Return RGB cropped face using local cropper
+
+video_face_crop_preproc_64_64_face_50_local_cropper_rgb = VideoFaceCrop(cropped_image_size = cropped_image_size,
+                                                                    cropped_positions = cropped_positions,
+                                                                    fixed_positions = fixed_positions,
+                                                                    mask_sigma = mask_sigma,
+                                                                    mask_neighbors = mask_neighbors,
+                                                                    mask_seed = None,
+                                                                    check_face_size_flag = check_face_size_flag,
+                                                                    min_face_size = min_face_size,
+                                                                    use_local_cropper_flag = use_local_cropper_flag,
+                                                                    rgb_output_flag = rgb_output_flag)
\ No newline at end of file
diff --git a/bob/pad/face/config/qm_svm.py b/bob/pad/face/config/qm_svm.py
new file mode 100644
index 0000000000000000000000000000000000000000..dcf1473c76f85b3ea806324e55d4f60152c6e95c
--- /dev/null
+++ b/bob/pad/face/config/qm_svm.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+"""
+@author: Olegs Nikisins
+
+This file contains configurations to run Image Quality Measures (IQM) and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+
+#=======================================================================================
+sub_directory = 'qm_svm'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+
+#=======================================================================================
+# define preprocessor:
+
+from ..preprocessor import VideoFaceCrop
+
+CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
+CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+FIXED_POSITIONS = None
+MASK_SIGMA = None             # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
+MASK_SEED = None              # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MIN_FACE_SIZE = 50
+USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
+                             cropped_positions = CROPPED_POSITIONS,
+                             fixed_positions = FIXED_POSITIONS,
+                             mask_sigma = MASK_SIGMA,
+                             mask_neighbors = MASK_NEIGHBORS,
+                             mask_seed = None,
+                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                             min_face_size = MIN_FACE_SIZE,
+                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
+                             rgb_output_flag = RGB_OUTPUT_FLAG)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
+below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
+"""
+
+
+#=======================================================================================
+# define extractor:
+
+from ..extractor import VideoQualityMeasure
+
+GALBALLY=True
+MSU=True
+DTYPE=None
+
+extractor = VideoQualityMeasure(galbally=GALBALLY,
+                                msu=MSU,
+                                dtype=DTYPE)
+"""
+In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
+The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+
+#=======================================================================================
+# define algorithm:
+
+from ..algorithm import VideoSvmPadAlgorithm
+
+MACHINE_TYPE = 'C_SVC'
+KERNEL_TYPE = 'RBF'
+N_SAMPLES = 10000
+TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
+MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
+                                 kernel_type = KERNEL_TYPE,
+                                 n_samples = N_SAMPLES,
+                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
+                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
+                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+"""
+The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The grid search of SVM parameters is used to select the successful settings.
+The grid search is done on the subset of training data.
+The size of this subset is defined by ``n_samples`` parameter.
+
+The data is also mean-std normalized, ``mean_std_norm_flag = True``.
+"""
+
diff --git a/bob/pad/face/database/replay.py b/bob/pad/face/database/replay.py
index a272479b4f53967a3a1b159766d55fa4b7edca20..e8f052f2a725f6c8da4172f52ca92636afe4e3b0 100644
--- a/bob/pad/face/database/replay.py
+++ b/bob/pad/face/database/replay.py
@@ -47,7 +47,7 @@ class ReplayPadFile(PadFile):
         # attack_type is a string and I decided to make it like this for this
         # particular database. You can do whatever you want for your own database.
 
-        super(ReplayPadFile, self).__init__(client_id=f.client, path=f.path,
+        super(ReplayPadFile, self).__init__(client_id=f.client_id, path=f.path,
                                             attack_type=attack_type, file_id=f.id)
 
     #==========================================================================
@@ -66,9 +66,9 @@ class ReplayPadFile(PadFile):
 
         **Returns:**
 
-        ``filtered_image`` : :py:class:`dict`
-            A dictionary containing the key-value pairs: "video" key containing the frames data,
-            and "bbx" containing the coordinates of the face bounding boxes for each frame.
+        ``video_data`` : FrameContainer
+            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
         """
 
         path = self.f.make_path(directory=directory, extension=extension) # path to the video file
@@ -150,6 +150,11 @@ class ReplayPadDatabase(PadDatabase):
 
         ``model_ids``
             This parameter is not supported in PAD databases yet
+
+        **Returns:**
+
+        ``files`` : :py:class:`str`
+            A list of ReplayPadFile objects.
         """
         # Convert group names to low-level group names here.
         groups = self.convert_names_to_lowlevel(groups, self.low_level_group_names, self.high_level_group_names)
diff --git a/bob/pad/face/extractor/ImageQualityMeasure.py b/bob/pad/face/extractor/ImageQualityMeasure.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3140a42d5e216caf8921389441738fc8a099bd3
--- /dev/null
+++ b/bob/pad/face/extractor/ImageQualityMeasure.py
@@ -0,0 +1,114 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+#==============================================================================
+# Import what is needed here:
+
+from __future__ import division
+from bob.bio.base.extractor import Extractor
+from bob.ip.qualitymeasure import galbally_iqm_features as iqm
+from bob.ip.qualitymeasure import msu_iqa_features as iqa
+import numpy
+import logging
+
+logger = logging.getLogger(__name__)
+
+#==============================================================================
+# Main body:
+
+class ImageQualityMeasure(Extractor):
+    """
+    This class is designed to extract Image Quality Measures given input RGB
+    image. For further documentation and description of features,
+    see "bob.ip.qualitymeasure".
+
+    **Parameters:**
+
+    ``galbally`` : :py:class:`bool`
+        If ``True``, galbally features will be added to the features.
+        Default: ``True``.
+
+    ``msu`` : :py:class:`bool`
+        If ``True``, MSU features will be added to the features.
+        Default: ``True``.
+
+    ``dtype`` : numpy.dtype
+        The data type of the resulting feature vector.
+        Default: ``None``.
+    """
+    #==========================================================================
+    def __init__(self,
+                 galbally=True,
+                 msu=True,
+                 dtype=None,
+                 **kwargs):
+
+        Extractor.__init__(self,
+                           galbally=galbally,
+                           msu=msu,
+                           dtype=dtype,
+                           **kwargs)
+
+        self.dtype = dtype
+        self.galbally = galbally
+        self.msu = msu
+
+
+    #==========================================================================
+    def __call__(self, data):
+        """
+        Compute Image Quality Measures given input RGB image.
+
+        **Parameters:**
+
+        ``data`` : 3D :py:class:`numpy.ndarray`
+            Input RGB image of the dimensionality (3, Row, Col), as returned
+            by Bob image loading routines.
+
+        **Returns:**
+
+        ``features`` : 1D :py:class:`numpy.ndarray`
+            Feature vector containing Image Quality Measures.
+        """
+
+        assert isinstance(data, numpy.ndarray)
+        assert self.galbally or self.msu
+
+        features = []
+
+        if self.galbally:
+
+            try:
+
+                gf_set = iqm.compute_quality_features(data)
+                gf_set = numpy.nan_to_num(gf_set)
+                features = numpy.hstack((features, gf_set))
+
+            except Exception as e:
+
+                logger.error(
+                    "Failed to extract galbally features.", exc_info=e)
+
+                return None
+
+        if self.msu:
+
+            try:
+
+                msuf_set = iqa.compute_msu_iqa_features(data)
+                msuf_set = numpy.nan_to_num(msuf_set)
+                features = numpy.hstack((features, msuf_set))
+
+            except Exception as e:
+
+                logger.error("Failed to extract MSU features.", exc_info=e)
+
+                return None
+
+        elif self.dtype is not None:
+
+            features = features.astype(self.dtype)
+
+        return features
+
+
diff --git a/bob/pad/face/extractor/LBPHistogram.py b/bob/pad/face/extractor/LBPHistogram.py
index c66d7b464c4c6b2e013d83ef975b91964d0ba181..ee0a583b57f225dbc89e084d941842364171ddb3 100644
--- a/bob/pad/face/extractor/LBPHistogram.py
+++ b/bob/pad/face/extractor/LBPHistogram.py
@@ -87,7 +87,7 @@ class LBPHistogram(Extractor):
                     elbp_type=elbps[elbptype])
             else:  # we assume neighbors==8 in this case
                 lbp = bob.ip.base.LBP(
-                    neighbors=16, circular=circ, radius=rad, to_average=mct,
+                    neighbors=8, circular=circ, radius=rad, to_average=mct,
                     elbp_type=elbps[elbptype])
 
         self.dtype = dtype
diff --git a/bob/pad/face/extractor/VideoDataLoader.py b/bob/pad/face/extractor/VideoDataLoader.py
new file mode 100644
index 0000000000000000000000000000000000000000..18a314db22b2f36f3f4e1c74840a62b762a3f898
--- /dev/null
+++ b/bob/pad/face/extractor/VideoDataLoader.py
@@ -0,0 +1,117 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Thu Jun  1 13:55:02 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+import os
+
+import bob.bio.video
+
+#==============================================================================
+# Main body of the class
+
+class VideoDataLoader(object):
+    """
+    This class is designed to load video data given name of the file.
+    The class is called by corresponding extractors in the experiments using
+    emty(!) preprocessor. In this scenario the video data is loaded directly
+    from the database, avoiding duplicate storage of non-processed data in the
+    experimental directory.
+
+    NOTE:
+    To use this class in PAD experiments the command line argument
+    ``--preprocessed-directory`` must point to the original database directory.
+    For example:
+    --preprocessed-directory <DIRECTORY_CONTAINING_REPLAY_ATTACK_DATABASE>
+
+    At this point the class is just a collection of methods.
+    """
+
+    #==========================================================================
+    def get_complete_filename(self, filename):
+        """
+        Get a complete filename given a filename without an extension.
+
+        **Parameters:**
+
+        ``filename`` : :py:class:`str`
+            A name of the file containing the path, but no extension.
+
+        **Returns:**
+
+        ``filename_complete`` : :py:class:`str`
+            A complete filename, incliding extension.
+        """
+
+        path, filename_no_ext = os.path.split(filename)
+
+        filenames = []
+        extensions = []
+
+        for f in os.listdir(path):
+
+            filenames.append(os.path.splitext(f)[0])
+            extensions.append(os.path.splitext(f)[1])
+
+
+        idx = filenames.index(filename_no_ext) # index of the file
+
+        file_extension = extensions[idx] # get extension of the file
+
+        filename_complete = os.path.join(path, filename_no_ext + file_extension)
+
+        return filename_complete
+
+
+    #==========================================================================
+    def load_video_data(self, filename_complete):
+        """
+        Load video data given a complete filename.
+
+        **Parameters:**
+
+        ``filename_complete`` : :py:class:`str`
+            A complete filename, incliding extension.
+
+        **Returns:**
+
+        ``video_data`` : FrameContainer
+            A FrameContainer containing the loaded video data.
+        """
+
+        frame_selector = bob.bio.video.FrameSelector(selection_style = 'all') # select all frames from the video file
+
+        video_data = frame_selector(filename_complete) # video data
+
+        return video_data
+
+
+    #==========================================================================
+    def __call__(self, filename):
+        """
+        Load video data given a filename without an extension.
+
+        **Parameters:**
+
+        ``filename`` : :py:class:`str`
+            A name of the file containing the path, but no extension.
+
+        **Returns:**
+
+        ``video_data`` : FrameContainer
+            A FrameContainer containing the loaded video data.
+        """
+
+        filename_complete = self.get_complete_filename(filename)
+
+        video_data = self.load_video_data(filename_complete)
+
+        return video_data
+
+
diff --git a/bob/pad/face/extractor/VideoLBPHistogram.py b/bob/pad/face/extractor/VideoLBPHistogram.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad920ce2071348a1b7bf5062e7aea74c6c29dfa6
--- /dev/null
+++ b/bob/pad/face/extractor/VideoLBPHistogram.py
@@ -0,0 +1,151 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Tue May 16 13:48:43 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.extractor import Extractor
+
+from bob.pad.face.extractor import LBPHistogram
+
+import bob.bio.video
+
+
+#==============================================================================
+# Main body:
+
+class VideoLBPHistogram(Extractor, object):
+    """
+    This class is designed to extract LBP histograms for each frame in the input
+    video sequence/container.
+
+    **Parameters:**
+
+    ``lbptype`` : :py:class:`str`
+        The type of the LBP operator ("regular", "uniform" or "riu2").
+        Default: uniform.
+
+    ``elbptype`` : :py:class:`str`
+        The type of extended version of LBP (regular if not extended version
+        is used, otherwise transitional, direction_coded or modified).
+        Default: regular.
+
+    ``rad`` : :py:class:`float`
+        The radius of the circle on which the points are taken (for circular
+        LBP). Default: 1
+
+    ``neighbors`` : :py:class:`int`
+        The number of points around the central point on which LBP is
+        computed. Possible options: (4, 8, 16). Default: 8.
+
+    ``circ`` : :py:class:`bool`
+        Set to True if circular LBP is needed. Default: False.
+
+    ``dtype`` : numpy.dtype
+        If specified in the constructor, the resulting features will have
+        that type of data. Default: None.
+    """
+
+    #==========================================================================
+    def __init__(self,
+                 lbptype='uniform',
+                 elbptype='regular',
+                 rad=1,
+                 neighbors=8,
+                 circ=False,
+                 dtype=None):
+
+
+        super(VideoLBPHistogram, self).__init__(lbptype = lbptype,
+                                                elbptype = elbptype,
+                                                rad = rad,
+                                                neighbors = neighbors,
+                                                circ = circ,
+                                                dtype = dtype)
+
+        self.lbptype = lbptype
+        self.elbptype = elbptype
+        self.rad = rad
+        self.neighbors = neighbors
+        self.circ = circ
+        self.dtype = dtype
+
+        # extractor to process a single image/frame:
+        extractor = LBPHistogram(lbptype=lbptype,
+                                 elbptype=elbptype,
+                                 rad=rad,
+                                 neighbors=neighbors,
+                                 circ=circ,
+                                 dtype=dtype)
+
+        # a wrapper allowing to apply above extractor to the whole video:
+        self.video_extractor = bob.bio.video.extractor.Wrapper(extractor)
+
+
+    #==========================================================================
+    def __call__(self, frames):
+        """
+        Extracts LBP histogram for each frame in the input video sequence/container.s
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        **Returns:**
+
+        ``lbp_histograms`` : FrameContainer
+            LBP histograms for each frame stored in the FrameContainer.
+        """
+
+        lbp_histograms = self.video_extractor(frames = frames)
+
+        return lbp_histograms
+
+
+    #==========================================================================
+    def write_feature(self, frames, file_name):
+        """
+        Writes the given data (that has been generated using the __call__ function of this class) to file.
+        This method overwrites the write_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``frames`` :
+            Data returned by the __call__ method of the class.
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+        """
+
+        self.video_extractor.write_feature(frames, file_name)
+
+
+    #==========================================================================
+    def read_feature(self, file_name):
+        """
+        Reads the preprocessed data from file.
+        This method overwrites the read_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+
+        **Returns:**
+
+        ``frames`` : :py:class:`bob.bio.video.FrameContainer`
+            Frames stored in the frame container.
+        """
+
+        frames = self.video_extractor.read_feature(file_name)
+
+        return frames
+
+
diff --git a/bob/pad/face/extractor/VideoQualityMeasure.py b/bob/pad/face/extractor/VideoQualityMeasure.py
new file mode 100644
index 0000000000000000000000000000000000000000..33c89e4d8ef5e9422bb42648d770c8c0f53e9c32
--- /dev/null
+++ b/bob/pad/face/extractor/VideoQualityMeasure.py
@@ -0,0 +1,144 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Wed May 31 16:39:34 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.extractor import Extractor
+
+from bob.pad.face.extractor import ImageQualityMeasure
+
+import bob.bio.video
+
+from bob.pad.face.extractor import VideoDataLoader
+
+import six
+
+#==============================================================================
+# Main body:
+
+class VideoQualityMeasure(Extractor, object):
+    """
+    This class is designed to extract Quality Measures for each frame in the
+    input color video. For further documentation and description of features,
+    see "bob.ip.qualitymeasure".
+
+    **Parameters:**
+
+    ``galbally`` : :py:class:`bool`
+        If ``True``, galbally features will be added to the features.
+        Default: ``True``.
+
+    ``msu`` : :py:class:`bool`
+        If ``True``, MSU features will be added to the features.
+        Default: ``True``.
+
+    ``dtype`` : numpy.dtype
+        The data type of the resulting feature vector.
+        Default: ``None``.
+    """
+
+    #==========================================================================
+    def __init__(self,
+                 galbally=True,
+                 msu=True,
+                 dtype=None,
+                 **kwargs):
+
+        super(VideoQualityMeasure, self).__init__(galbally = galbally,
+                                                  msu = msu,
+                                                  dtype = dtype)
+
+        self.galbally = galbally
+        self.msu = msu
+        self.dtype = dtype
+
+        # extractor to process a single image/frame:
+        extractor = ImageQualityMeasure(galbally = galbally,
+                                        msu = msu,
+                                        dtype = dtype)
+
+        # a wrapper allowing to apply above extractor to the whole video:
+        self.video_extractor = bob.bio.video.extractor.Wrapper(extractor)
+
+
+    #==========================================================================
+    def __call__(self, frames):
+        """
+        Extract feature vectors containing Quality Measures for each frame
+        in the input color video sequence/container. The resulting features
+        will be saved to the FrameContainer too.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer or string.
+            Video data stored in the FrameContainer,
+            see ``bob.bio.video.utils.FrameContainer`` for further details.
+            If string, the name of the file to load the video data from is
+            defined in it. String is possible only when empty preprocessor is
+            used. In this case video data is loaded directly from the database.
+
+        **Returns:**
+
+        ``quality_measures`` : FrameContainer
+            Quality Measures for each frame stored in the FrameContainer.
+        """
+
+        if isinstance(frames, six.string_types): # if frames is a path(!)
+
+            video_loader = VideoDataLoader()
+
+            frames = video_loader(frames) # frames is now a FrameContainer
+
+#        import ipdb; ipdb.set_trace()
+
+        quality_measures = self.video_extractor(frames = frames)
+
+        return quality_measures
+
+
+    #==========================================================================
+    def write_feature(self, frames, file_name):
+        """
+        Writes the given data (that has been generated using the __call__ function of this class) to file.
+        This method overwrites the write_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``frames`` :
+            Data returned by the __call__ method of the class.
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+        """
+
+        self.video_extractor.write_feature(frames, file_name)
+
+
+    #==========================================================================
+    def read_feature(self, file_name):
+        """
+        Reads the preprocessed data from file.
+        This method overwrites the read_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+
+        **Returns:**
+
+        ``frames`` : :py:class:`bob.bio.video.FrameContainer`
+            Frames stored in the frame container.
+        """
+
+        frames = self.video_extractor.read_feature(file_name)
+
+        return frames
+
+
diff --git a/bob/pad/face/extractor/__init__.py b/bob/pad/face/extractor/__init__.py
index 4a9104dbdf19213a0c6f9395faa7cd7708647024..18f4b835c9389d6486d8e548a817784646fbb339 100644
--- a/bob/pad/face/extractor/__init__.py
+++ b/bob/pad/face/extractor/__init__.py
@@ -1,5 +1,8 @@
 from .LBPHistogram import LBPHistogram
-
+from .VideoLBPHistogram import VideoLBPHistogram
+from .ImageQualityMeasure import ImageQualityMeasure
+from .VideoDataLoader import VideoDataLoader
+from .VideoQualityMeasure import VideoQualityMeasure
 
 def __appropriate__(*args):
     """Says object was actually declared here, and not in the import module.
@@ -21,5 +24,9 @@ def __appropriate__(*args):
 
 __appropriate__(
     LBPHistogram,
+    VideoLBPHistogram,
+    ImageQualityMeasure,
+    VideoQualityMeasure,
+    VideoDataLoader,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/pad/face/preprocessor/FrameDifference.py b/bob/pad/face/preprocessor/FrameDifference.py
new file mode 100644
index 0000000000000000000000000000000000000000..696b90e0a1f79df52977f381b8dce666f8f0c881
--- /dev/null
+++ b/bob/pad/face/preprocessor/FrameDifference.py
@@ -0,0 +1,342 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Fri May 12 14:14:23 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.preprocessor import Preprocessor
+
+import numpy as np
+
+import bob.bio.video
+
+import bob.ip.base
+
+import bob.ip.color
+
+#==============================================================================
+# Main body:
+
+class FrameDifference(Preprocessor, object):
+    """
+    This class is designed to compute frame differences for both facial and
+    background regions. The constraint of minimal size of the face can be
+    applied to input video selecting only the frames overcoming the threshold.
+    This behavior is controlled by ``check_face_size_flag`` and ``min_face_size``
+    arguments of the class.
+    It is also possible to compute the frame differences for a limited number
+    of frames specifying the ``number_of_frames`` parameter.
+
+    **Parameters:**
+
+    ``number_of_frames`` : :py:class:`int`
+        The number of frames to extract the frame differences from.
+        If ``None``, all frames of the input video are used. Default: ``None``.
+
+    ``check_face_size_flag`` : :py:class:`bool`
+        If True, only return the frames containing faces of the size above the
+        specified threshold ``min_face_size``. Default: ``False``.
+
+    ``min_face_size`` : :py:class:`int`
+        The minimal size of the face in pixels. Only valid when ``check_face_size_flag``
+        is set to True. Default: 50.
+    """
+
+    def __init__(self,
+                 number_of_frames = None,
+                 check_face_size_flag = False,
+                 min_face_size = 50):
+
+        super(FrameDifference, self).__init__(number_of_frames = number_of_frames,
+                                              check_face_size_flag = check_face_size_flag,
+                                              min_face_size = min_face_size)
+
+        self.number_of_frames = number_of_frames
+        self.check_face_size_flag = check_face_size_flag
+        self.min_face_size = min_face_size
+
+
+    #==========================================================================
+    def eval_face_differences(self, previous, current, annotations):
+        """
+        Evaluates the normalized frame difference on the face region.
+
+        If bounding_box is None or invalid, returns 0.
+
+        **Parameters:**
+
+        ``previous`` : 2D :py:class:`numpy.ndarray`
+            Previous frame as a gray-scaled image
+
+        ``current`` : 2D :py:class:`numpy.ndarray`
+            The current frame as a gray-scaled image
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``.
+
+        **Returns:**
+
+        ``face`` : :py:class:`float`
+            A size normalized integral difference of facial regions in two input
+            images.
+        """
+
+        prev = previous[annotations['topleft'][0]:annotations['bottomright'][0],
+                        annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        curr = current[annotations['topleft'][0]:annotations['bottomright'][0],
+                       annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        face_diff = abs(curr.astype('int32') - prev.astype('int32'))
+
+        face = face_diff.sum()
+
+        face /= float(face_diff.size)
+
+        return face
+
+
+    #==========================================================================
+    def eval_background_differences(self, previous, current, annotations, border=None):
+        """
+        Evaluates the normalized frame difference on the background.
+
+        If bounding_box is None or invalid, returns 0.
+
+        **Parameters:**
+
+        ``previous`` : 2D :py:class:`numpy.ndarray`
+            Previous frame as a gray-scaled image
+
+        ``current`` : 2D :py:class:`numpy.ndarray`
+            The current frame as a gray-scaled image
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``.
+
+        ``border`` : :py:class:`int`
+            The border size to consider. If set to ``None``, consider all image from the
+            face location up to the end. Default: ``None``.
+
+        **Returns:**
+
+        ``bg`` : :py:class:`float`
+            A size normalized integral difference of non-facial regions in two input
+            images.
+        """
+
+        height = annotations['bottomright'][0] - annotations['topleft'][0]
+        width = annotations['bottomright'][1] - annotations['topleft'][1]
+
+        full_diff = abs(current.astype('int32') - previous.astype('int32'))
+
+        if border is None:
+            full = full_diff.sum()
+            full_size = full_diff.size
+
+        else:
+
+            y1 = annotations['topleft'][0] - border
+            if y1 < 0: y1 = 0
+            x1 = annotations['topleft'][1] - border
+            if x1 < 0: x1 = 0
+            y2 = y1 + height + (2*border)
+            if y2 > full_diff.shape[0]: y2 = full_diff.shape[0]
+            x2 = x1 + width + (2*border)
+            if x2 > full_diff.shape[1]: x2 = full_diff.shape[1]
+            full = full_diff[y1:y2, x1:x2].sum()
+            full_size = full_diff[y1:y2, x1:x2].size
+
+        face_diff = full_diff[annotations['topleft'][0]:(annotations['topleft'][0]+height),
+            annotations['topleft'][1]:(annotations['topleft'][1]+width)]
+
+        # calculates the differences in the face and background areas
+        face = face_diff.sum()
+        bg = full - face
+
+        normalization = float(full_size - face_diff.size)
+        if normalization < 1: #prevents zero division
+            bg = 0.0
+        else:
+            bg /= float(full_size - face_diff.size)
+
+        return bg
+
+
+    #==========================================================================
+    def check_face_size(self, frame_container, annotations, min_face_size):
+        """
+        Return the FrameContainer containing the frames with faces of the
+        size overcoming the specified threshold. The annotations for the selected
+        frames are also returned.
+
+        **Parameters:**
+
+        ``frame_container`` : FrameContainer
+            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        ``min_face_size`` : :py:class:`int`
+            The minimal size of the face in pixels.
+
+        **Returns:**
+
+        ``selected_frames`` : FrameContainer
+            Selected frames stored in the FrameContainer.
+
+        ``selected_annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for selected frames.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+        """
+
+        selected_frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        selected_annotations = {}
+
+        selected_frame_idx = 0
+
+        for idx in range(0, len(annotations)): # idx - frame index
+
+            frame_annotations = annotations[str(idx)] # annotations for particular frame
+
+            # size of current face
+            face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+
+            if face_size >= min_face_size: # check if face size is above the threshold
+
+                selected_frame = frame_container[idx][1] # get current frame
+
+                selected_frames.add(selected_frame_idx, selected_frame) # add current frame to FrameContainer
+
+                selected_annotations[str(selected_frame_idx)] = annotations[str(idx)]
+
+                selected_frame_idx = selected_frame_idx + 1
+
+        return selected_frames, selected_annotations
+
+
+    #==========================================================================
+    def comp_face_bg_diff(self, frames, annotations, number_of_frames = None):
+        """
+        This function computes the frame differences for both facial and background
+        regions. These parameters are computed for ``number_of_frames`` frames
+        in the input FrameContainer.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            RGB video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        ``number_of_frames`` : :py:class:`int`
+            The number of frames to use in processing. If ``None``, all frames of the
+            input video are used. Default: ``None``.
+
+        **Returns:**
+
+        ``diff`` : 2D :py:class:`numpy.ndarray`
+            An array of the size ``(number_of_frames - 1) x 2``.
+            The first column contains frame differences of facial regions.
+            The second column contains frame differences of non-facial/background regions.
+        """
+
+        # Compute the number of frames to process:
+        if number_of_frames is not None:
+            number_of_frames = np.min([len(frames), number_of_frames])
+        else:
+            number_of_frames = len(frames)
+
+        previous = frames[0][1] # the first frame in the video
+
+        if len(previous.shape) == 3: # if RGB convert to gray-scale
+            previous = bob.ip.color.rgb_to_gray(previous)
+
+        diff = []
+
+        for k in range(1, number_of_frames):
+
+            current = frames[k][1]
+
+            if len(current.shape) == 3: # if RGB convert to gray-scale
+                current = bob.ip.color.rgb_to_gray(current)
+
+            face_diff = self.eval_face_differences(previous, current, annotations[str(k)])
+            bg_diff = self.eval_background_differences(previous, current, annotations[str(k)], None)
+
+            diff.append((face_diff, bg_diff))
+
+            # swap buffers: current <=> previous
+            tmp = previous
+            previous = current
+            current = tmp
+
+        if not diff: # if list is empty
+
+            diff = [(np.NaN, np.NaN)]
+
+        diff = np.vstack(diff)
+
+        return diff
+
+
+    #==========================================================================
+    def __call__(self, frames, annotations):
+        """
+        This method calls the ``comp_face_bg_diff`` function of this class
+        computing the frame differences for both facial and background regions.
+        The frame differences are computed for selected frames, which are returned
+        by ``check_face_size`` function of this class. This ``check_face_size`` is
+        done only if ``check_face_size_flag = True``.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            RGB video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        **Returns:**
+
+        ``diff`` : 2D :py:class:`numpy.ndarray`
+            An array of the size ``(number_of_frames - 1) x 2``.
+            The first column contains frame differences of facial regions.
+            The second column contains frame differences of non-facial/background regions.
+        """
+
+        if self.check_face_size_flag:
+
+            selected_frames, selected_annotations = self.check_face_size(frames, annotations, self.min_face_size)
+
+        diff = self.comp_face_bg_diff(frames = selected_frames,
+                                      annotations = selected_annotations,
+                                      number_of_frames = self.number_of_frames)
+
+        return diff
+
+
diff --git a/bob/pad/face/preprocessor/ImageFaceCrop.py b/bob/pad/face/preprocessor/ImageFaceCrop.py
new file mode 100644
index 0000000000000000000000000000000000000000..6b998ef42d069c653cd0c77723b0c55430a3a5f1
--- /dev/null
+++ b/bob/pad/face/preprocessor/ImageFaceCrop.py
@@ -0,0 +1,181 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Tue May 30 14:11:16 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.preprocessor import Preprocessor
+
+import numpy as np
+
+import bob.ip.color
+
+import bob.ip.base
+
+#==============================================================================
+# Main body:
+
+class ImageFaceCrop(Preprocessor):
+    """
+    This class crops the face in the input image given annotations defining
+    the face bounding box. The size of the face is also normalized to the
+    pre-defined dimensions. For RGB inputs it is possible to return both
+    color and gray-scale outputs. This option is controlled by ``rgb_output_flag``.
+
+    The algorithm is identical to the following paper:
+    "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+
+    **Parameters:**
+
+    ``face_size`` : :py:class:`int`
+        The size of the face after normalization.
+
+    ``rgb_output_flag`` : :py:class:`bool`
+        Return RGB cropped face if ``True``, otherwise a gray-scale image is
+        returned. Default: ``False``.
+    """
+    #==========================================================================
+    def __init__(self,
+                 face_size,
+                 rgb_output_flag = False):
+
+
+        Preprocessor.__init__(self,
+                              face_size = face_size,
+                              rgb_output_flag = rgb_output_flag)
+
+        self.face_size = face_size
+        self.rgb_output_flag = rgb_output_flag
+
+
+    #==========================================================================
+    def normalize_image_size_in_grayscale(self, image, annotations, face_size):
+        """
+        This function crops the face in the input Gray-scale image given annotations
+        defining the face bounding box. The size of the face is also normalized to the
+        pre-defined dimensions.
+
+        The algorithm is identical to the following paper:
+        "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+
+        **Parameters:**
+
+        ``image`` : 2D :py:class:`numpy.ndarray`
+            Gray-scale input image.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+
+        ``face_size`` : :py:class:`int`
+            The size of the face after normalization.
+
+        **Returns:**
+
+        ``normbbx`` : 2D :py:class:`numpy.ndarray`
+            An image of the cropped face of the size (self.face_size, self.face_size).
+        """
+
+        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][0],
+                 annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        tempbbx = np.ndarray((face_size, face_size), 'float64')
+        normbbx = np.ndarray((face_size, face_size), 'uint8')
+        bob.ip.base.scale(cutframe, tempbbx) # normalization
+        tempbbx_ = tempbbx + 0.5
+        tempbbx_ = np.floor(tempbbx_)
+        normbbx = np.cast['uint8'](tempbbx_)
+
+        return normbbx
+
+
+    #==========================================================================
+    def normalize_image_size(self, image, annotations, face_size, rgb_output_flag):
+        """
+        This function crops the face in the input image given annotations defining
+        the face bounding box. The size of the face is also normalized to the
+        pre-defined dimensions. For RGB inputs it is possible to return both
+        color and gray-scale outputs. This option is controlled by ``rgb_output_flag``.
+
+        The algorithm is identical to the following paper:
+        "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+
+        **Parameters:**
+
+        ``image`` : 2D or 3D :py:class:`numpy.ndarray`
+            Input image (RGB or gray-scale).
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+
+        ``face_size`` : :py:class:`int`
+            The size of the face after normalization.
+
+        ``rgb_output_flag`` : :py:class:`bool`
+            Return RGB cropped face if ``True``, otherwise a gray-scale image is
+            returned. Default: ``False``.
+
+        **Returns:**
+
+        ``face`` : 2D or 3D :py:class:`numpy.ndarray`
+            An image of the cropped face of the size (self.face_size, self.face_size),
+            rgb 3D or gray-scale 2D.
+        """
+
+        if len(image.shape) == 3:
+
+            if not(rgb_output_flag):
+
+                image = bob.ip.color.rgb_to_gray(image)
+
+        if len(image.shape) == 2:
+
+            image = [image] # make gray-scale image an iterable
+
+        result = []
+
+        for image_channel in image: # for all color channels in the input image
+
+            cropped_face = self.normalize_image_size_in_grayscale(image_channel, annotations, face_size)
+
+            result.append(cropped_face)
+
+        face = np.stack(result, axis=0)
+
+        face = np.squeeze(face) # squeeze 1-st dimension for gray-scale images
+
+        return face
+
+
+    #==========================================================================
+    def __call__(self, image, annotations):
+        """
+        Call the ``normalize_image_size()`` method of this class.
+
+        **Parameters:**
+
+        ``image`` : 2D or 3D :py:class:`numpy.ndarray`
+            Input image (RGB or gray-scale).
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows ``{'topleft': (row, col), 'bottomright': (row, col)}``
+
+        **Returns:**
+
+        ``norm_face_image`` : 2D or 3D :py:class:`numpy.ndarray`
+            An image of the cropped face of the size (self.face_size, self.face_size),
+            rgb 3D or gray-scale 2D.
+        """
+
+        norm_face_image = self.normalize_image_size(image, annotations, self.face_size, self.rgb_output_flag)
+
+        return norm_face_image
+
+
diff --git a/bob/pad/face/preprocessor/VideoFaceCrop.py b/bob/pad/face/preprocessor/VideoFaceCrop.py
index 0c7aa5efe6329c866b967713c18a3e8e4d2bb004..3ea266c98fece64d37d3d751c5c14ad894678ef1 100644
--- a/bob/pad/face/preprocessor/VideoFaceCrop.py
+++ b/bob/pad/face/preprocessor/VideoFaceCrop.py
@@ -3,7 +3,7 @@
 """
 Created on Fri May 12 14:14:23 2017
 
-@author: onikisins
+@author: Olegs Nikisins
 """
 #==============================================================================
 # Import what is needed here:
@@ -14,6 +14,10 @@ from bob.bio.face.preprocessor import FaceCrop
 
 import bob.bio.video
 
+import numpy as np
+
+from bob.pad.face.preprocessor.ImageFaceCrop import ImageFaceCrop
+
 #==============================================================================
 # Main body:
 
@@ -54,9 +58,26 @@ class VideoFaceCrop(Preprocessor, object):
           When run in parallel, the same random seed will be applied to all parallel processes.
           Hence, results of parallel execution will differ from the results in serial execution.
 
+    ``check_face_size_flag`` : :py:class:`bool`
+        If True, only return the frames containing faces of the size above the
+        specified threshold ``min_face_size``. Default: False.
+
+    ``min_face_size`` : :py:class:`int`
+        The minimal size of the face in pixels. Only valid when ``check_face_size_flag``
+        is set to True. Default: 50.
+
+    ``use_local_cropper_flag`` : :py:class:`bool`
+        If True, use the local ImageFaceCrop class to crop faces in the frames.
+        Otherwise, the FaceCrop preprocessor from bob.bio.face is used.
+        Default: False.
+
+    ``rgb_output_flag`` : :py:class:`bool`
+        Return RGB cropped face if ``True``, otherwise a gray-scale image is
+        returned. This flag is only valid when ``use_local_cropper_flag = True``.
+        Default: ``False``.
+
     ``kwargs``
         Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
-
     """
 
     #==========================================================================
@@ -67,15 +88,23 @@ class VideoFaceCrop(Preprocessor, object):
                  mask_sigma = None,
                  mask_neighbors = 5,
                  mask_seed = None,
+                 check_face_size_flag = False,
+                 min_face_size = 50,
+                 use_local_cropper_flag = False,
+                 rgb_output_flag = False,
                  **kwargs):
 
         super(VideoFaceCrop, self).__init__(cropped_image_size = cropped_image_size,
-                                             cropped_positions = cropped_positions,
-                                             fixed_positions = fixed_positions,
-                                             mask_sigma = mask_sigma,
-                                             mask_neighbors = mask_neighbors,
-                                             mask_seed = mask_seed,
-                                             **kwargs)
+                                            cropped_positions = cropped_positions,
+                                            fixed_positions = fixed_positions,
+                                            mask_sigma = mask_sigma,
+                                            mask_neighbors = mask_neighbors,
+                                            mask_seed = mask_seed,
+                                            check_face_size_flag = check_face_size_flag,
+                                            min_face_size = min_face_size,
+                                            use_local_cropper_flag = use_local_cropper_flag,
+                                            rgb_output_flag = rgb_output_flag,
+                                            **kwargs)
 
         self.cropped_image_size = cropped_image_size
         self.cropped_positions = cropped_positions
@@ -83,18 +112,76 @@ class VideoFaceCrop(Preprocessor, object):
         self.mask_sigma = mask_sigma
         self.mask_neighbors = mask_neighbors
         self.mask_seed = mask_seed
+        self.check_face_size_flag = check_face_size_flag
+        self.min_face_size = min_face_size
+        self.use_local_cropper_flag = use_local_cropper_flag
+        self.rgb_output_flag = rgb_output_flag
 
         # Save also the data stored in the kwargs:
         for (k, v) in kwargs.items():
             setattr(self, k, v)
 
-        self.preprocessor = FaceCrop(cropped_image_size = cropped_image_size,
-                                     cropped_positions = cropped_positions,
-                                     fixed_positions = fixed_positions,
-                                     mask_sigma = mask_sigma,
-                                     mask_neighbors = mask_neighbors,
-                                     mask_seed = mask_seed,
-                                     **kwargs)
+        if self.use_local_cropper_flag:
+
+            preprocessor = ImageFaceCrop(face_size = self.cropped_image_size[0],
+                                         rgb_output_flag = self.rgb_output_flag)
+
+        else:
+
+            preprocessor = FaceCrop(cropped_image_size = self.cropped_image_size,
+                                    cropped_positions = self.cropped_positions,
+                                    fixed_positions = self.fixed_positions,
+                                    mask_sigma = self.mask_sigma,
+                                    mask_neighbors = self.mask_neighbors,
+                                    mask_seed = self.mask_seed,
+                                    **kwargs)
+
+        self.video_preprocessor = bob.bio.video.preprocessor.Wrapper(preprocessor)
+
+
+    #==========================================================================
+    def check_face_size(self, frame_container, annotations, min_face_size):
+        """
+        Return the FrameContainer containing the frames with faces of the
+        size overcoming the specified threshold.
+
+        **Parameters:**
+
+        ``frame_container`` : FrameContainer
+            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
+            for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
+            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding box in frame N.
+
+        ``min_face_size`` : :py:class:`int`
+            The minimal size of the face in pixels.
+        """
+
+        cleaned_frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        selected_frame_idx = 0
+
+        for idx in range(0, len(annotations)): # idx - frame index
+
+            frame_annotations = annotations[str(idx)] # annotations for particular frame
+
+            # size of current face
+            face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+
+            if face_size >= min_face_size: # check if face size is above the threshold
+
+                selected_frame = frame_container[idx][1] # get current frame
+
+                cleaned_frame_container.add(selected_frame_idx, selected_frame) # add current frame to FrameContainer
+
+                selected_frame_idx = selected_frame_idx + 1
+
+        return cleaned_frame_container
+
 
     #==========================================================================
     def __call__(self, frames, annotations):
@@ -103,7 +190,7 @@ class VideoFaceCrop(Preprocessor, object):
 
         **Parameters:**
 
-        ``image`` : FrameContainer
+        ``frames`` : FrameContainer
             Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
             for further details.
 
@@ -119,12 +206,15 @@ class VideoFaceCrop(Preprocessor, object):
             Cropped faces stored in the FrameContainer.
         """
 
-        video_preprocessor = bob.bio.video.preprocessor.Wrapper(self.preprocessor)
+        preprocessed_video = self.video_preprocessor(frames = frames, annotations = annotations)
+
+        if self.check_face_size_flag:
 
-        preprocessed_video = video_preprocessor(frames = frames, annotations = annotations)
+            preprocessed_video = self.check_face_size(preprocessed_video, annotations, self.min_face_size)
 
         return preprocessed_video
 
+
     #==========================================================================
     def write_data( self, frames, file_name ):
         """
@@ -140,13 +230,14 @@ class VideoFaceCrop(Preprocessor, object):
             name of the file.
         """
 
-        bob.bio.video.preprocessor.Wrapper.write_data(frames, file_name)
+        self.video_preprocessor.write_data(frames, file_name)
+
 
     #==========================================================================
     def read_data( self, file_name ):
         """
         Reads the preprocessed data from file.
-        his method overwrites the read_data() method of the Preprocessor class.
+        This method overwrites the read_data() method of the Preprocessor class.
 
         **Parameters:**
 
@@ -159,7 +250,7 @@ class VideoFaceCrop(Preprocessor, object):
             Frames stored in the frame container.
         """
 
-        frames = bob.bio.video.preprocessor.Wrapper.read_data(file_name)
+        frames = self.video_preprocessor.read_data(file_name)
 
         return frames
 
diff --git a/bob/pad/face/preprocessor/__init__.py b/bob/pad/face/preprocessor/__init__.py
index 60586054834555c02d0e06f3964dc5272657e023..c9c5a4225d2dfff5065d2559ecea9c6ebed03b0f 100644
--- a/bob/pad/face/preprocessor/__init__.py
+++ b/bob/pad/face/preprocessor/__init__.py
@@ -1,4 +1,6 @@
 from .VideoFaceCrop import VideoFaceCrop
+from .ImageFaceCrop import ImageFaceCrop
+from .FrameDifference import FrameDifference
 
 
 def __appropriate__(*args):
@@ -21,5 +23,7 @@ def __appropriate__(*args):
 
 __appropriate__(
     VideoFaceCrop,
+    ImageFaceCrop,
+    FrameDifference,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/develop.cfg b/develop.cfg
new file mode 100644
index 0000000000000000000000000000000000000000..f8617f19c93663008158d1b270e9c09b0cdd219a
--- /dev/null
+++ b/develop.cfg
@@ -0,0 +1,110 @@
+; vim: set fileencoding=utf-8 :
+; Thu Oct  9 16:51:06 CEST 2014
+
+[buildout]
+parts = scripts
+eggs = bob.pad.face
+       bob.extension
+       bob.blitz
+       bob.core
+       bob.sp
+       bob.math
+       bob.io.base
+       bob.ip.gabor
+       bob.measure
+       bob.ip.base
+       bob.learn.boosting
+       bob.io.image
+       bob.ip.draw
+       bob.ip.color
+       bob.io.video
+       bob.io.matlab
+       bob.learn.activation
+       bob.ip.flandmark
+       bob.ip.facedetect
+       bob.ip.qualitymeasure
+       bob.learn.linear
+       bob.db.base
+       bob.learn.em
+       bob.db.atnt
+       bob.bio.base
+       bob.bio.face
+       bob.learn.libsvm
+       bob.bio.video
+       bob.pad.base
+
+extensions = bob.buildout
+             mr.developer
+
+auto-checkout = *
+
+develop = src/bob.extension
+          src/bob.blitz
+          src/bob.core
+          src/bob.sp
+          src/bob.math
+          src/bob.io.base
+          src/bob.ip.gabor
+          src/bob.measure
+          src/bob.ip.base
+          src/bob.learn.boosting
+          src/bob.io.image
+          src/bob.ip.draw
+          src/bob.ip.color
+          src/bob.io.video
+          src/bob.io.matlab
+          src/bob.learn.activation
+          src/bob.ip.flandmark
+          src/bob.ip.facedetect
+          src/bob.ip.qualitymeasure
+          src/bob.learn.linear
+          src/bob.db.base
+          src/bob.learn.em
+          src/bob.db.atnt
+          src/bob.bio.base
+          src/bob.bio.face
+          src/bob.learn.libsvm
+          src/bob.bio.video
+          src/bob.pad.base
+          .
+
+; options for bob.buildout
+debug = true
+verbose = true
+newest = false
+
+[sources]
+bob.extension = git git@gitlab.idiap.ch:bob/bob.extension
+bob.blitz = git git@gitlab.idiap.ch:bob/bob.blitz
+bob.core = git git@gitlab.idiap.ch:bob/bob.core
+bob.sp = git git@gitlab.idiap.ch:bob/bob.sp
+bob.math = git git@gitlab.idiap.ch:bob/bob.math
+bob.io.base = git git@gitlab.idiap.ch:bob/bob.io.base
+bob.ip.gabor = git git@gitlab.idiap.ch:bob/bob.ip.gabor
+bob.measure = git git@gitlab.idiap.ch:bob/bob.measure
+bob.ip.base = git git@gitlab.idiap.ch:bob/bob.ip.base
+bob.learn.boosting = git git@gitlab.idiap.ch:bob/bob.learn.boosting
+bob.io.image = git git@gitlab.idiap.ch:bob/bob.io.image
+bob.ip.draw = git git@gitlab.idiap.ch:bob/bob.ip.draw
+bob.ip.color = git git@gitlab.idiap.ch:bob/bob.ip.color
+bob.io.video = git git@gitlab.idiap.ch:bob/bob.io.video
+bob.io.matlab = git git@gitlab.idiap.ch:bob/bob.io.matlab
+bob.learn.activation = git git@gitlab.idiap.ch:bob/bob.learn.activation
+bob.ip.flandmark = git git@gitlab.idiap.ch:bob/bob.ip.flandmark
+bob.ip.facedetect = git git@gitlab.idiap.ch:bob/bob.ip.facedetect
+bob.ip.qualitymeasure = git git@gitlab.idiap.ch:bob/bob.ip.qualitymeasure
+bob.learn.linear = git git@gitlab.idiap.ch:bob/bob.learn.linear
+bob.db.base = git git@gitlab.idiap.ch:bob/bob.db.base
+bob.learn.em = git git@gitlab.idiap.ch:bob/bob.learn.em
+bob.db.atnt = git git@gitlab.idiap.ch:bob/bob.db.atnt
+bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base
+bob.bio.face = git git@gitlab.idiap.ch:bob/bob.bio.face
+bob.learn.libsvm = git git@gitlab.idiap.ch:bob/bob.learn.libsvm
+bob.bio.video = git git@gitlab.idiap.ch:bob/bob.bio.video
+bob.pad.base = git git@gitlab.idiap.ch:bob/bob.pad.base
+
+
+[scripts]
+recipe = bob.buildout:scripts
+dependent-scripts = true
+
diff --git a/doc/baselines.rst b/doc/baselines.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7569495f0bd047988573b14191e8ef71ef49a4d2
--- /dev/null
+++ b/doc/baselines.rst
@@ -0,0 +1,196 @@
+
+
+.. _bob.pad.face.baselines:
+
+
+===============================
+ Executing Baseline Algorithms
+===============================
+
+This section explains how to execute face presentation attack detection (PAD) algorithms implemented
+in ``bob.pad.face``.
+
+
+Running Baseline Experiments
+----------------------------
+
+To run the baseline PAD experiments, the ``spoof.py`` script located in ``bin`` directory is used.
+To see the description of the script you can type in the console:
+
+.. code-block:: sh
+
+   $ ./bin/verify.py --help
+
+This script is explained in more detail in :ref:`bob.pad.base.experiments`.
+
+Usually it is a good idea to have at least verbose level 2 (i.e., calling
+``spoof.py --verbose --verbose``, or the short version ``spoof.py
+-vv``).
+
+.. note:: **Running in Parallel**
+
+   To run the experiments in parallel, you can define an SGE grid or local host
+   (multi-processing) configurations as explained in
+   :ref:`running_in_parallel`.
+
+   In short, to run in the Idiap SGE grid, you can simply add the ``--grid``
+   command line option, with grid configuration parameters. To run experiments in parallel on
+   the local machine, simply add a ``--parallel <N>`` option, where ``<N>``
+   specifies the number of parallel jobs you want to execute.
+
+
+Database setups and baselines are encoded using
+:ref:`bob.bio.base.configuration-files`, all stored inside the package root, in
+the directory ``bob/pad/face/config``. Documentation for each resource
+is available on the section :ref:`bob.pad.face.resources`.
+
+.. warning::
+
+   You **cannot** run experiments just by executing the command line
+   instructions described in this guide. You **need first** to procure yourself
+   the raw data files that correspond to *each* database used here in order to
+   correctly run experiments with those data. Biometric data is considered
+   private date and, under EU regulations, cannot be distributed without a
+   consent or license. You may consult our
+   :ref:`bob.pad.face.resources.databases` resources section for checking
+   currently supported databases and accessing download links for the raw data
+   files.
+
+   Once the raw data files have been downloaded, particular attention should be
+   given to the directory locations of those. Unpack the databases carefully
+   and annotate the root directory where they have been unpacked.
+
+   Then, carefully read the *Databases* section of
+   :ref:`bob.pad.base.installation` on how to correctly setup the
+   ``~/.bob_bio_databases.txt`` file.
+
+   Use the following keywords on the left side of the assignment (see
+   :ref:`bob.pad.face.resources.databases`):
+
+   .. code-block:: text
+
+      [YOUR_REPLAY_ATTACK_DIRECTORY] = /complete/path/to/replayattack-database/
+
+   Notice it is rather important to use the strings as described above,
+   otherwise ``bob.pad.base`` will not be able to correctly load your images.
+
+   Once this step is done, you can proceed with the instructions below.
+
+
+.. _bob.pad.face.baselines.replay_attack:
+
+
+------------
+
+Baselines on REPLAY-ATTACK database
+--------------------------------------
+
+This section summarizes the results of baseline face PAD experiments on the REPLAY-ATTACK (`replayattack`_) database.
+
+
+LBP features of facial region + SVM classifier
+===================================================
+
+Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`.
+
+To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, execute the following:
+
+.. code-block:: sh
+
+    $ ./bin/spoof.py lbp-svm \
+    --database replay --protocol grandtest --groups train dev eval \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS>
+
+.. tip::
+
+    If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
+    Simply add ``--grid idiap`` argument to the above command. For example:
+
+    .. code-block:: sh
+
+        $ ./bin/spoof.py lbp-svm \
+        --database replay --protocol grandtest --groups train dev eval \
+        --sub-directory <PATH_TO_STORE_THE_RESULTS> \
+        --grid idiap
+
+To understand the settings of this baseline PAD experiment you can check the
+corresponding configuration file: ``bob/pad/face/config/lbp_svm.py``
+
+To evaluate the results computing EER, HTER and plotting ROC you can use the
+following command:
+
+.. code-block:: sh
+
+    ./bin/evaluate.py \
+    --dev-files  <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-dev  \
+    --eval-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-eval \
+    --legends "LBP features of facial region + SVM classifier + REPLAY-ATTACK database" \
+    -F 7 \
+    --criterion EER \
+    --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
+
+
+The EER/HTER errors for `replayattack`_ database are summarized in the Table below:
+
++-------------------+----------+----------+
+|      Protocol     |  EER,\%  |  HTER,\% |
++===================+==========+==========+
+|   ``grandtest``   |  15.117  |  15.609  |
++-------------------+----------+----------+
+
+The ROC curves for the particular experiment can be downloaded from here:
+
+:download:`ROC curve <img/ROC_lbp_svm_replay_attack.pdf>`
+
+
+------------
+
+Image Quality Measures as features of facial region + SVM classifier
+========================================================================
+
+Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.qm_svm_replayattack`.
+
+To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, execute the following:
+
+.. code-block:: sh
+
+    $ ./bin/spoof.py qm-svm \
+    --database replay --protocol grandtest --groups train dev eval \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS>
+
+.. tip::
+
+    Similarly to the tip above you can run this baseline in parallel.
+
+To understand the settings of this baseline PAD experiment you can check the
+corresponding configuration file: ``bob/pad/face/config/qm_svm.py``
+
+To evaluate the results computing EER, HTER and plotting ROC you can use the
+following command:
+
+.. code-block:: sh
+
+    ./bin/evaluate.py \
+    --dev-files  <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-dev  \
+    --eval-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-eval \
+    --legends "IQM features of facial region + SVM classifier + REPLAY-ATTACK database" \
+    -F 7 \
+    --criterion EER \
+    --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
+
+The EER/HTER errors for `replayattack`_ database are summarized in the Table below:
+
++-------------------+----------+----------+
+|      Protocol     |  EER,\%  |  HTER,\% |
++===================+==========+==========+
+|   ``grandtest``   |  4.321   |  4.570   |
++-------------------+----------+----------+
+
+The ROC curves for the particular experiment can be downloaded from here:
+
+:download:`ROC curve <img/ROC_iqm_svm_replay_attack.pdf>`
+
+------------
+
+
+.. include:: links.rst
diff --git a/doc/guide.rst b/doc/guide.rst
deleted file mode 100644
index e115bff942d3a1d0986d247572d8d933edb4a95b..0000000000000000000000000000000000000000
--- a/doc/guide.rst
+++ /dev/null
@@ -1,16 +0,0 @@
-.. py:currentmodule:: bob.pad.face
-
-.. testsetup:: *
-
-   from __future__ import print_function
-   import pkg_resources
-
-============================================================
- Presentation Attack Detection in Face Biometrics in Python
-============================================================
-
-MFCC Extraction
----------------
-
-Two funct
-
diff --git a/doc/img/ROC_iqm_svm_replay_attack.pdf b/doc/img/ROC_iqm_svm_replay_attack.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..a4524065a5d3f1e41e547ae51f63859db6c505d9
Binary files /dev/null and b/doc/img/ROC_iqm_svm_replay_attack.pdf differ
diff --git a/doc/img/ROC_lbp_svm_replay_attack.pdf b/doc/img/ROC_lbp_svm_replay_attack.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..700f2f2ff4f617b49ba18a9cb52863c679caaa82
Binary files /dev/null and b/doc/img/ROC_lbp_svm_replay_attack.pdf differ
diff --git a/doc/index.rst b/doc/index.rst
index 8e68816b872bc0d554ec5313d5aede1f0bc92a75..a260d13f553315f56fd1407ed957d81afaa60135 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -2,20 +2,32 @@
 
 .. _bob.pad.face:
 
-==================================================
- Presentation Attack Detection in Face Biometrics
-==================================================
+========================================================
+Library for Facial Presentation Attack Detection (PAD)
+========================================================
 
-.. todolist::
+The Facial Presentation Attack Detection Library is an open source tool consisting of a
+series of plugins for bob.pad.base_, our open-source biometric recognition
+platform. As a result, it is fully extensible using bob.pad.base_ documented
+types and techniques. Please refer to the manual of that package for a thorough
+introduction. In this guide, we focus on details concerning
+face PAD experiments using our plugins.
 
-Package Documentation
----------------------
+=============
+Users Guide
+=============
 
 .. toctree::
    :maxdepth: 2
 
-   guide
-   refrences
+   installation
+   baselines
+   references
+   resources
    py_api
 
+.. todolist::
+
+.. include:: links.rst
+
 
diff --git a/doc/installation.rst b/doc/installation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..504281c7491a85b880760b82dcfd635b4ebe7a9f
--- /dev/null
+++ b/doc/installation.rst
@@ -0,0 +1,94 @@
+
+
+.. _bob.pad.face.installation:
+
+==============
+ Installation
+==============
+
+The installation of this package is divided in 2-parts. Installation of the
+package and its software dependencies and the installation of databases.
+
+
+Package Installation
+--------------------
+
+To install this package, first follow our `installation`_ instructions. Then,
+using the Python interpreter provided by the distribution, bootstrap and
+buildout this package:
+
+
+.. code-block:: sh
+
+  $ buildout
+
+
+Sphinx Documentation Building
+-----------------------------
+
+Once the package is installed, you may re-build this documentation locally by
+running:
+
+.. code-block:: sh
+
+  $ sphinx-build doc html
+
+The resulting HTML documentation will be output inside the directory `html`.
+
+
+Setting up Databases
+--------------------
+
+In order to run face PAD algorithms using this package, you'll need to
+make sure to download the raw files corresponding to the databases you'd like
+to process. The raw files are not distributed with Bob_ software as biometric
+data is, to most countries, considered sensible data that cannot be obtained
+without explicit licensing from a data controller. You must visit the websites
+below, sign the license agreements and then download the data before trying out
+to run the baselines.
+
+.. note::
+
+   If you're at the Idiap Research Institute in Switzlerand, the datasets in
+   the baselines mentioned in this guide are already downloaded and
+   pre-installed on our shared file system. You don't need to re-download
+   databases or create a ``~/.bob_bio_databases.txt`` file.
+
+
+The current system readily supports the following freely available datasets:
+
+* ``replay-attack``: `replayattack`_
+
+
+After downloading the databases, annotate the base directories in which they
+are installed. Then, follow the instructions in
+:ref:`bob.pad.base.installation` to let this framework know where databases are
+located on your system.
+
+
+Development
+-----------
+
+If you're developing this package, you may automatically clone all necessary
+Bob_ repositories on your local package installation. This allows you to build
+against an environment which contains all of our dependencies_, but no
+previously installed Bob_ packages. To do so, use the buildout recipe in
+``develop.cfg`` just after bootstraping:
+
+.. code-block:: sh
+
+  $ buildout -c develop.cfg
+
+Database SQL support files
+==========================
+
+If you installed all packages from scratch like above, you'll need to download
+the SQL support files of some of the database front-ends available in this
+package. This operation can be easily done like this:
+
+.. code-block:: sh
+
+  $ bob_dbmanage.py all download
+
+
+.. include:: links.rst
diff --git a/doc/links.rst b/doc/links.rst
new file mode 100644
index 0000000000000000000000000000000000000000..56b4ead8c53eb5600ebee52aae0fed9e71f0354a
--- /dev/null
+++ b/doc/links.rst
@@ -0,0 +1,14 @@
+
+
+.. This file contains all links we use for documentation in a centralized place
+
+
+.. _idiap: http://www.idiap.
+.. _bob: http://www.idiap.ch/software/bob
+.. _buildout: http://www.buildout.org
+.. _pypi: http://pypi.python.org
+.. _installation: https://www.idiap.ch/software/bob/install
+.. _bob.pad.base: https://pypi.python.org/pypi/bob.pad.base
+.. _replayattack: https://www.idiap.ch/dataset/replayattack
+.. _dependencies: https://gitlab.idiap.ch/bob/bob/wikis/Dependencies
+
diff --git a/doc/py_api.rst b/doc/py_api.rst
index 127fc0eef1ee96689f8d2b56d0a832181a541038..be29aac297535990753e19e6adb5ae437289a09c 100644
--- a/doc/py_api.rst
+++ b/doc/py_api.rst
@@ -1,38 +1,29 @@
 .. vim: set fileencoding=utf-8 :
 
+.. _bob.pad.face.py_api:
+
 ===================================
  Tools implemented in bob.pad.face
 ===================================
 
-Summary
--------
+This section listst all the functionality available in this library allowing to run face PAD experiments.
 
 Databases
-~~~~~~~~~
+===================================
 
 Image Preprocessors
-~~~~~~~~~~~~~~~~~~~
+===================================
 
 Video Preprocessors
-~~~~~~~~~~~~~~~~~~~
+===================================
 
 Image Extractors
-~~~~~~~~~~~~~~~~
-
-.. autosummary::
-
-	bob.pad.face.extractor.LBPHistogram
+===================================
 
 Video Extractors
-~~~~~~~~~~~~~~~~
-
-
-
-.. automodule:: bob.pad.face
+===================================
 
 Image Extractors
-----------------
+===================================
+
 
-.. automodule:: bob.pad.face.extractor
-	:special-members: __call__
-	:no-inherited-members:
diff --git a/doc/references.rst b/doc/references.rst
new file mode 100644
index 0000000000000000000000000000000000000000..911101849170b2dd52a7cd6d1a885887bf7b9166
--- /dev/null
+++ b/doc/references.rst
@@ -0,0 +1,14 @@
+.. vim: set fileencoding=utf-8 :
+
+===========
+References
+===========
+
+.. [CAM12]  *I. Chingovska, A. Anjos, and S. Marcel*, **On the effectiveness of local binary patterns in face anti-spoofing**,
+            in: Biometrics Special Interest Group (BIOSIG), 2012 BIOSIG - Proceedings of the International Conference of the, 2012, pp. 1-7.
+
+.. [WHJ15]  *Di Wen, Member, IEEE, Hu Han, Member, IEEE and Anil K. Jain, Fellow, IEEE*, **Face Spoof Detection with Image Distortion Analysis**,
+            in: IEEE Transactions on Information Forensics and Security, 2015.
+
+.. [CBVM16] *A. Costa-Pazo, S. Bhattacharjee, E. Vazquez-Fernandez and S. Marcel*, **The Replay-Mobile Face Presentation-Attack Database**,
+            in: Biometrics Special Interest Group (BIOSIG), 2016 BIOSIG - Proceedings of the International Conference of the, 2016, pp. 1-7.
diff --git a/doc/refrences.rst b/doc/refrences.rst
deleted file mode 100644
index c1233b034a51840d4a554bf4b6c831e61781e0fe..0000000000000000000000000000000000000000
--- a/doc/refrences.rst
+++ /dev/null
@@ -1,10 +0,0 @@
-.. vim: set fileencoding=utf-8 :
-
-==========
-References
-==========
-
-.. [ChingovskaEffectivnes12]  I. Chingovska, A. Anjos, and S. Marcel, ''On the
-	effectiveness of local binary patterns in face anti- spoofing,'' in
-	Biometrics Special Interest Group (BIOSIG), 2012 BIOSIG- Proceedings of the
-	International Conference of the, 2012, pp. 1-7.
diff --git a/doc/resources.rst b/doc/resources.rst
new file mode 100644
index 0000000000000000000000000000000000000000..7b9d574bc3e99c013c3cac8dc84f8d436745dbec
--- /dev/null
+++ b/doc/resources.rst
@@ -0,0 +1,57 @@
+
+
+.. _bob.pad.face.resources:
+
+===========
+ Resources
+===========
+
+This section contains a listing of all ready-to-use resources you can find in
+this package.
+
+
+
+.. _bob.pad.face.resources.databases:
+
+Databases
+------------
+
+
+
+
+
+
+
+
+
+
+.. _bob.pad.face.resources.face_pad:
+
+Available face PAD systems
+------------------------------
+
+These configuration files/resources contain parameters of available face PAD systems/algorithms.
+The configuration files contain at least the following arguments of the ``spoof.py`` script:
+
+    * ``sub_directory``
+    * ``preprocessor``
+    * ``extractor``
+    * ``algorithm``
+
+
+.. _bob.pad.face.resources.face_pad.lbp_svm_replayattack:
+
+LBP features of facial region + SVM for REPLAY-ATTACK
+======================================================
+
+.. automodule:: bob.pad.face.config.lbp_svm
+   :members:
+
+
+.. _bob.pad.face.resources.face_pad.qm_svm_replayattack:
+
+Image Quality Measures as features of facial region + SVM for REPLAY-ATTACK
+================================================================================
+
+.. automodule:: bob.pad.face.config.qm_svm
+   :members:
\ No newline at end of file
diff --git a/requirements.txt b/requirements.txt
index 2daa4cd6696ddb812d78afa8888da245a3d56e06..fba91180c7a495b9d0c05130f390777816d35fdc 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,5 +1,16 @@
+setuptools
+six
+sphinx
+nose
+numpy
 bob.extension
+bob.bio.base
 bob.io.base
+bob.ip.base
+bob.pad.base
 bob.bio.face
 bob.bio.video
-bob.pad.base
+bob.io.image
+bob.ip.color
+bob.ip.qualitymeasure
+bob.learn.libsvm
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 3ccff59989c4aa43e4f921a72c89f461513a1b32..338b4dcfe4b03fda511137b5ae065a7604822587 100644
--- a/setup.py
+++ b/setup.py
@@ -93,10 +93,49 @@ setup(
             'version.py = bob.pad.face.script.version:main',
             ],
 
+        # registered databases:
         'bob.pad.database': [
             'replay = bob.pad.face.config.database.replay:database',
             ],
 
+        # registered configurations:
+        'bob.bio.config': [
+
+            # baselines:
+            'lbp-svm = bob.pad.face.config.lbp_svm',
+            'qm-svm = bob.pad.face.config.qm_svm',
+            ],
+
+        # registered preprocessors:
+        'bob.pad.preprocessor': [
+            'video-face-crop-preproc-64 = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64',
+            'video-face-crop-preproc-64-face-50 = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64_face_50',
+            'video-face-crop-preproc-64-face-50-local-cropper = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64_face_50_local_cropper',
+            'video-face-crop-preproc-64-face-50-local-cropper-rgb = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64_face_50_local_cropper_rgb',
+            'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
+            'frame-diff-min-size-50-200-frames = bob.pad.face.config.preprocessor.frame_difference:frame_diff_min_size_50_200_frames',
+            ],
+
+        # registered preprocessors:
+        'bob.pad.extractor': [
+            'video-lbp-histogram-extractor-n8r1-uniform = bob.pad.face.config.extractor.video_lbp_histogram:video_lbp_histogram_extractor_n8r1_uniform',
+            'video-quality-measure-galbally-msu = bob.pad.face.config.extractor.video_quality_measure:video_quality_measure_galbally_msu',
+            ],
+
+        # registered algorithms:
+        'bob.pad.algorithm': [
+            'video-svm-pad-algorithm-10k-grid = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_10k_grid',
+            'video-svm-pad-algorithm-10k-grid-mean-std = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_10k_grid_mean_std',
+            'video-svm-pad-algorithm-10k-grid-mean-std-frame-level = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_10k_grid_mean_std_frame_level',
+            'video-svm-pad-algorithm-default-svm-param-mean-std-frame-level = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_default_svm_param_mean_std_frame_level',
+            ],
+
+        # registered grid configurations:
+        'bob.pad.grid': [
+            'idiap = bob.pad.face.config.grid:idiap',
+            'idiap-user-machines = bob.pad.face.config.grid:idiap_user_machines',
+            ],
+
     },
 
     # Classifiers are important if you plan to distribute this package through