diff --git a/bob/pad/face/config/extractor/video_hist_of_sparse_codes.py b/bob/pad/face/config/extractor/video_hist_of_sparse_codes.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cce7e451642d3a04f685236775313250430127d
--- /dev/null
+++ b/bob/pad/face/config/extractor/video_hist_of_sparse_codes.py
@@ -0,0 +1,16 @@
+#!/usr/bin/env python
+
+from bob.pad.face.extractor import VideoHistOfSparseCodes
+
+
+#=======================================================================================
+# Define instances here:
+
+METHOD = "mean"
+extractor_mean = VideoHistOfSparseCodes(method = METHOD)
+
+
+METHOD = "hist"
+extractor_hist = VideoHistOfSparseCodes(method = METHOD)
+
+
diff --git a/bob/pad/face/config/preprocessor/video_sparse_coding.py b/bob/pad/face/config/preprocessor/video_sparse_coding.py
new file mode 100644
index 0000000000000000000000000000000000000000..905dd7b01edaddd01584eec16ebe839e890c65ca
--- /dev/null
+++ b/bob/pad/face/config/preprocessor/video_sparse_coding.py
@@ -0,0 +1,106 @@
+#!/usr/bin/env python
+
+from bob.pad.face.preprocessor import VideoSparseCoding
+
+
+#=======================================================================================
+# Define instances here:
+
+BLOCK_SIZE = 5
+BLOCK_LENGTH = 10
+MIN_FACE_SIZE = 50
+NORM_FACE_SIZE = 64
+DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert.hdf5"]
+FRAME_STEP = 10
+
+preprocessor = VideoSparseCoding(gblock_size = BLOCK_SIZE,
+                                 block_length = BLOCK_LENGTH,
+                                 min_face_size = MIN_FACE_SIZE,
+                                 norm_face_size = NORM_FACE_SIZE,
+                                 dictionary_file_names = DICTIONARY_FILE_NAMES,
+                                 frame_step = FRAME_STEP)
+
+
+#=======================================================================================
+
+
+BLOCK_SIZE = 5
+BLOCK_LENGTH = 10
+MIN_FACE_SIZE = 50
+NORM_FACE_SIZE = 64
+DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_16.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_16.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_16.hdf5"]
+FRAME_STEP = 2
+EXTRACT_HISTOGRAMS_FLAG = True
+
+preprocessor_10_5_16 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
+                                         block_length = BLOCK_LENGTH,
+                                         min_face_size = MIN_FACE_SIZE,
+                                         norm_face_size = NORM_FACE_SIZE,
+                                         dictionary_file_names = DICTIONARY_FILE_NAMES,
+                                         frame_step = FRAME_STEP,
+                                         extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
+
+BLOCK_SIZE = 5
+BLOCK_LENGTH = 10
+MIN_FACE_SIZE = 50
+NORM_FACE_SIZE = 64
+DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_32.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_32.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_32.hdf5"]
+FRAME_STEP = 2
+EXTRACT_HISTOGRAMS_FLAG = True
+
+preprocessor_10_5_32 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
+                                         block_length = BLOCK_LENGTH,
+                                         min_face_size = MIN_FACE_SIZE,
+                                         norm_face_size = NORM_FACE_SIZE,
+                                         dictionary_file_names = DICTIONARY_FILE_NAMES,
+                                         frame_step = FRAME_STEP,
+                                         extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
+
+BLOCK_SIZE = 5
+BLOCK_LENGTH = 10
+MIN_FACE_SIZE = 50
+NORM_FACE_SIZE = 64
+DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_64.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_64.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_64.hdf5"]
+FRAME_STEP = 2
+EXTRACT_HISTOGRAMS_FLAG = True
+
+preprocessor_10_5_64 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
+                                         block_length = BLOCK_LENGTH,
+                                         min_face_size = MIN_FACE_SIZE,
+                                         norm_face_size = NORM_FACE_SIZE,
+                                         dictionary_file_names = DICTIONARY_FILE_NAMES,
+                                         frame_step = FRAME_STEP,
+                                         extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
+
+
+BLOCK_SIZE = 5
+BLOCK_LENGTH = 10
+MIN_FACE_SIZE = 50
+NORM_FACE_SIZE = 64
+DICTIONARY_FILE_NAMES = ["/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_front_10_5_128.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_hor_10_5_128.hdf5",
+                         "/idiap/user/onikisins/Projects/ODIN/Python/scripts/test_scripts/data/dictionary_vert_10_5_128.hdf5"]
+FRAME_STEP = 2
+EXTRACT_HISTOGRAMS_FLAG = True
+
+preprocessor_10_5_128 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
+                                         block_length = BLOCK_LENGTH,
+                                         min_face_size = MIN_FACE_SIZE,
+                                         norm_face_size = NORM_FACE_SIZE,
+                                         dictionary_file_names = DICTIONARY_FILE_NAMES,
+                                         frame_step = FRAME_STEP,
+                                         extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG)
+
+
+
+
+
+
diff --git a/bob/pad/face/extractor/VideoHistOfSparseCodes.py b/bob/pad/face/extractor/VideoHistOfSparseCodes.py
new file mode 100644
index 0000000000000000000000000000000000000000..61747358cf625ba3043c521c46bcd78d800f0d3d
--- /dev/null
+++ b/bob/pad/face/extractor/VideoHistOfSparseCodes.py
@@ -0,0 +1,166 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Wed Sep 27 11:33:45 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.extractor import Extractor
+
+import numpy as np
+
+import bob.bio.video
+
+from bob.pad.face.extractor import ImageQualityMeasure
+
+#==============================================================================
+# Main body:
+
+class VideoHistOfSparseCodes(Extractor, object):
+    """
+    This class is designed to extract histograms of sparse codes.
+
+    **Parameters:**
+
+    ``method`` : :py:class:`str`
+        A method to use in the histogram computation. Two options are available:
+        "mean" and "hist". Default: "mean".
+    """
+
+    #==========================================================================
+    def __init__(self,
+                 method = "mean"):
+
+        super(VideoHistOfSparseCodes, self).__init__(method = method)
+
+        self.method = method
+
+        # extractor to process a single image/frame:
+        extractor = ImageQualityMeasure()
+
+        # This extractor is used only to write and read the feature vectors.
+        self.video_extractor = bob.bio.video.extractor.Wrapper(extractor)
+
+
+    #==========================================================================
+    def comp_hist_of_sparse_codes(self, frames, method):
+        """
+        Compute the histograms of sparse codes.
+        """
+
+        histograms = []
+
+        for frame_data in frames:
+
+            frame = frame_data[1]
+
+            if method == "mean":
+
+                frame_codes = np.mean(frame, axis=1)
+
+            if method == "hist":
+
+                frame_codes = np.mean(frame!=0, axis=1)
+
+            for idx, row in enumerate(frame_codes):
+
+                frame_codes[idx,:] = row/np.sum(row)
+
+            hist = frame_codes.flatten()
+
+            histograms.append(hist)
+
+        return histograms
+
+
+    #==========================================================================
+    def convert_sparse_codes_to_frame_container(self, list_of_arrays):
+        """
+        Convert an input list of arrays into Frame Container.
+
+        **Parameters:**
+
+        ``list_of_arrays`` : [:py:class:`numpy.ndarray`]
+            A list of arrays.
+
+        **Returns:**
+
+        ``frame_container`` : FrameContainer
+            FrameContainer containing the feature vectors.
+        """
+
+        frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        for idx, item in enumerate(list_of_arrays):
+
+            frame_container.add(idx, item) # add frame to FrameContainer
+
+        return frame_container
+
+
+    #==========================================================================
+    def __call__(self, frames):
+        """
+        Extract feature vectors.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer or string.
+            Data stored in the FrameContainer,
+            see ``bob.bio.video.utils.FrameContainer`` for further details.
+
+        **Returns:**
+
+        ``frame_container`` : FrameContainer
+            Histograms of sparse codes stored in the FrameContainer.
+        """
+
+        histograms = self.comp_hist_of_sparse_codes(frames, self.method)
+
+        frame_container = self.convert_sparse_codes_to_frame_container(histograms)
+
+        return frame_container
+
+
+    #==========================================================================
+    def write_feature(self, frames, file_name):
+        """
+        Writes the given data (that has been generated using the __call__ function of this class) to file.
+        This method overwrites the write_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``frames`` :
+            Data returned by the __call__ method of the class.
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+        """
+
+        self.video_extractor.write_feature(frames, file_name)
+
+
+    #==========================================================================
+    def read_feature(self, file_name):
+        """
+        Reads the preprocessed data from file.
+        This method overwrites the read_data() method of the Extractor class.
+
+        **Parameters:**
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+
+        **Returns:**
+
+        ``frames`` : :py:class:`bob.bio.video.FrameContainer`
+            Frames stored in the frame container.
+        """
+
+        frames = self.video_extractor.read_feature(file_name)
+
+        return frames
diff --git a/bob/pad/face/extractor/__init__.py b/bob/pad/face/extractor/__init__.py
index 310989b454bdd1ebb73ef43b6c4f4a9fd56a72e6..1699d53449c3badf1c1f665dbb9a2c680f6e95f9 100644
--- a/bob/pad/face/extractor/__init__.py
+++ b/bob/pad/face/extractor/__init__.py
@@ -4,6 +4,7 @@ from .ImageQualityMeasure import ImageQualityMeasure
 from .VideoDataLoader import VideoDataLoader
 from .VideoQualityMeasure import VideoQualityMeasure
 from .FrameDiffFeatures import FrameDiffFeatures
+from .VideoHistOfSparseCodes import VideoHistOfSparseCodes
 
 def __appropriate__(*args):
     """Says object was actually declared here, and not in the import module.
@@ -30,5 +31,6 @@ __appropriate__(
     VideoQualityMeasure,
     VideoDataLoader,
     FrameDiffFeatures,
+    VideoHistOfSparseCodes,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/pad/face/preprocessor/VideoSparseCoding.py b/bob/pad/face/preprocessor/VideoSparseCoding.py
new file mode 100644
index 0000000000000000000000000000000000000000..944ca82e30ae792dd269d8ae927b61bbbaa01ce5
--- /dev/null
+++ b/bob/pad/face/preprocessor/VideoSparseCoding.py
@@ -0,0 +1,924 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Sep 22 2017
+
+@author: Olegs Nikisins
+"""
+
+#==============================================================================
+# Import what is needed here:
+
+from bob.bio.base.preprocessor import Preprocessor
+
+import bob.bio.video
+
+import numpy as np
+
+import random
+random.seed(7)
+
+from sklearn.decomposition import SparseCoder
+
+import bob.io.base
+
+
+#==============================================================================
+class VideoSparseCoding(Preprocessor, object):
+    """
+    This class is designed to compute sparse codes for spatial frontal,
+    spatio-temporal horizontal, and spatio-temporal vertical patches.
+    The codes are computed for all possible stacks of facial images.
+    The maximum possible number of stacks is:
+    (``num_of_frames_in_video`` - ``block_length``).
+    However, this number can be smaller, and is controlled by two arguments
+    of this class: ``min_face_size`` and ``frame_step``.
+
+    **Parameters:**
+
+    ``block_size`` : :py:class:`int`
+        The spatial size of facial patches. Default: 5 .
+
+    ``block_length`` : :py:class:`int`
+        The temporal length of the stack of facial images / number of frames
+        per stack. Default: 10 .
+
+    ``min_face_size`` : :py:class:`int`
+        Discard frames with face of the size less than ``min_face_size``.
+        Default: 50 .
+
+    ``norm_face_size`` : :py:class:`int`
+        The size of the face after normalization. Default: 64 .
+
+    ``dictionary_file_names`` : [:py:class:`str`]
+        A list of filenames containing the dictionaries. The filenames must be
+        listed in the following order:
+        [file_name_pointing_to_frontal_dictionary,
+        file_name_pointing_to_horizontal_dictionary,
+        file_name_pointing_to_vertical_dictionary]
+
+    ``frame_step`` : :py:class:`int`
+        Selected frames for processing with this step. If set to 1, all frames
+        will be processes. Used to speed up the experiments.
+        Default: 1.
+
+    ``extract_histograms_flag`` : :py:class:`bool`
+        If this flag is set to ``True`` the histograms of sparse codes will be
+        computed for all stacks of facial images / samples. In this case an
+        empty feature extractor must be used, because feature vectors (histograms)
+        are already extracted in the preprocessing step.
+
+        NOTE: set this flag to``True`` if you want to reduce the amount of
+        memory required to store temporary files.
+        Default: ``False``.
+
+    ``method`` : :py:class:`str`
+        A method to use in the histogram computation. Two options are available:
+        "mean" and "hist". This argument is valid only if ``extract_histograms_flag``
+        is set to ``True``.
+        Default: "hist".
+    """
+
+    #==========================================================================
+    def __init__(self,
+                 block_size = 5,
+                 block_length = 10,
+                 min_face_size = 50,
+                 norm_face_size = 64,
+                 dictionary_file_names = [],
+                 frame_step = 1,
+                 extract_histograms_flag = False,
+                 method = "hist",
+                 **kwargs):
+
+        super(VideoSparseCoding, self).__init__(block_size = block_size,
+                                                block_length = block_length,
+                                                min_face_size = min_face_size,
+                                                norm_face_size = norm_face_size,
+                                                dictionary_file_names = dictionary_file_names,
+                                                frame_step = frame_step,
+                                                extract_histograms_flag = extract_histograms_flag,
+                                                method = method)
+
+        self.block_size = block_size
+        self.block_length = block_length
+        self.min_face_size = min_face_size
+        self.norm_face_size = norm_face_size
+        self.dictionary_file_names = dictionary_file_names
+        self.frame_step = frame_step
+        self.extract_histograms_flag = extract_histograms_flag
+        self.method = method
+
+        self.video_preprocessor = bob.bio.video.preprocessor.Wrapper()
+
+
+    #==========================================================================
+    def crop_norm_face_grayscale(self, image, annotations, face_size):
+        """
+        This function crops the face in the input Gray-scale image given
+        annotations defining the face bounding box. The size of the face is
+        also normalized to the pre-defined dimensions.
+
+        The algorithm is identical to the following paper:
+        "On the Effectiveness of Local Binary Patterns in Face Anti-spoofing"
+
+        **Parameters:**
+
+        ``image`` : 2D :py:class:`numpy.ndarray`
+            Gray-scale input image.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows:
+            ``{'topleft': (row, col), 'bottomright': (row, col)}``
+
+        ``face_size`` : :py:class:`int`
+            The size of the face after normalization.
+
+        **Returns:**
+
+        ``normbbx`` : 2D :py:class:`numpy.ndarray`
+            Cropped facial image of the size (self.face_size, self.face_size).
+        """
+
+        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][0],
+                 annotations['topleft'][1]:annotations['bottomright'][1]]
+
+        tempbbx = np.ndarray((face_size, face_size), 'float64')
+        normbbx = np.ndarray((face_size, face_size), 'uint8')
+        bob.ip.base.scale(cutframe, tempbbx) # normalization
+        tempbbx_ = tempbbx + 0.5
+        tempbbx_ = np.floor(tempbbx_)
+        normbbx = np.cast['uint8'](tempbbx_)
+
+        return normbbx
+
+
+    #==========================================================================
+    def crop_norm_faces_grayscale(self, images, annotations, face_size):
+        """
+        This function crops and normalizes faces in a stack of images given
+        annotations of the face bounding box for the first image in the stack.
+
+        **Parameters:**
+
+        ``images`` : 3D :py:class:`numpy.ndarray`
+            A stack of gray-scale input images. The size of the array is
+            (n_images x n_rows x n_cols).
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows:
+            ``{'topleft': (row, col), 'bottomright': (row, col)}``
+
+        ``face_size`` : :py:class:`int`
+            The size of the face after normalization.
+
+        **Returns:**
+
+        ``normbbx`` : 3D :py:class:`numpy.ndarray`
+            A stack of normalized faces.
+        """
+
+        normbbx = []
+
+        for image in images:
+
+            normbbx.append( self.crop_norm_face_grayscale(image, annotations, face_size) )
+
+        normbbx = np.stack(normbbx)
+
+        return normbbx
+
+
+    #==========================================================================
+    def select_all_blocks(self, images, block_size):
+        """
+        Extract all possible 3D blocks from a stack of images.
+
+        ``images`` : 3D :py:class:`numpy.ndarray`
+            A stack of gray-scale input images. The size of the array is
+            (``n_images`` x ``n_rows`` x ``n_cols``).
+
+        ``block_size`` : :py:class:`int`
+            The spatial size of patches. The size of extracted 3D blocks is:
+            (``n_images`` x ``block_size`` x ``block_size``).
+        """
+
+        (_, row_num, col_num) = images.shape
+
+        all_blocks = []
+
+        for row in range(row_num - block_size):
+
+            for col in range(col_num - block_size):
+
+                block = images[:, row:row+block_size, col:col+block_size]
+
+                all_blocks.append( block )
+
+        return all_blocks
+
+
+    #==========================================================================
+    def convert_frame_cont_to_grayscale_array(self, frame_cont):
+        """
+        Convert color video stored in the frame container into 3D array storing
+        gray-scale frames. The dimensions of the output array are:
+        (n_frames x n_rows x n_cols).
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            Video data stored in the FrameContainer, see
+            ``bob.bio.video.utils.FrameContainer`` for further details.
+
+        **Returns:**
+
+        ``result_array`` : 3D :py:class:`numpy.ndarray`
+            A stack of gray-scale frames. The size of the array is
+            (n_frames x n_rows x n_cols).
+        """
+
+        result_array = []
+
+        for frame in frame_cont:
+
+            image = frame[1]
+
+            result_array.append( bob.ip.color.rgb_to_gray(image) )
+
+        result_array = np.stack(result_array)
+
+        return result_array
+
+
+    #==========================================================================
+    def get_all_blocks_from_color_channel(self, video, annotations, block_size, block_length, min_face_size, norm_face_size):
+        """
+        Extract all 3D blocks from facial region of the input 3D array.
+        Input 3D array represents one color channel of the video or a gray-scale
+        video. Blocks are extracted from all 3D facial volumes. Facial volumes
+        overlap with a shift of one frame.
+
+        The size of the facial volume is:
+        (``block_length`` x ``norm_face_size`` x ``norm_face_size``).
+
+        The maximum number of available facial volumes in the video:
+        (``num_of_frames_in_video`` - ``block_length``).
+        However the final number of facial volumes might be less than above,
+        because frames with small faces ( < min_face_size ) are discarded.
+
+        **Parameters:**
+
+        ``video`` : 3D :py:class:`numpy.ndarray`
+            A stack of gray-scale input images. The size of the array is
+            (n_images x n_rows x n_cols).
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure:
+            ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``,
+            where
+            ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding
+            box in frame N.
+
+        ``block_size`` : :py:class:`int`
+            The spatial size of facial patches.
+
+        ``block_length`` : :py:class:`int`
+            The temporal length of the stack of facial images / number of frames
+            per stack.
+
+        ``min_face_size`` : :py:class:`int`
+            Discard frames with face of the size less than ``min_face_size``.
+
+        ``norm_face_size`` : :py:class:`int`
+            The size of the face after normalization.
+
+        **Returns:**
+
+        ``all_blocks`` : [[3D :py:class:`numpy.ndarray`]]
+            Internal list contains all possible 3D blocks/volumes extracted from
+            a particular stack of facial images. The dimensions of each 3D block:
+            (block_length x block_size x block_size).
+            The number of possible blocks is: (norm_face_size - block_size)^2.
+
+            The length of the outer list is equal to the number of possible
+            facial stacks in the input video:
+            (``num_of_frames_in_video`` - ``block_length``).
+            However, the final number of facial volumes might be less than above,
+            because frames with small faces ( < min_face_size ) are discarded.
+        """
+
+        annotated_frames = annotations.keys()
+
+        all_blocks = []
+
+        for fn in range(len(video)-block_length):
+
+            if str(fn) in annotated_frames: # process if frame is annotated
+
+                frame_annotations = annotations[str(fn)]
+
+                face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+
+                if face_size >= min_face_size: # process is face is large enough
+
+                    # Selected 3D stacks of images. Stack has ``block_length`` images.
+                    stack_of_images = video[fn:fn + block_length, :, :]
+
+                    # 3D stacks of normalized face images.
+                    faces = self.crop_norm_faces_grayscale(stack_of_images, frame_annotations, norm_face_size)
+
+                    # A list with all blocks per stack of facial images.
+                    list_all_blocks_per_stack = self.select_all_blocks(faces, block_size)
+
+                    all_blocks.append( list_all_blocks_per_stack )
+
+        return all_blocks
+
+
+    #==========================================================================
+    def extract_patches_from_blocks(self, all_blocks):
+        """
+        Extract frontal, central-horizontal and central-vertical patches from
+        all blocks returned by ``get_all_blocks_from_color_channel``
+        method of this class. The patches are returned in a vectorized form.
+
+        **Parameters:**
+
+        ``all_blocks`` : [[3D :py:class:`numpy.ndarray`]]
+            Internal list contains all possible 3D blocks/volumes extracted from
+            a particular stack of facial images. The dimensions of each 3D block:
+            (block_length x block_size x block_size).
+            The number of possible blocks is: (norm_face_size - block_size)^2.
+
+            The length of the outer list is equal to the number of possible
+            facial stacks in the input video:
+            (``num_of_frames_in_video`` - ``block_length``).
+            However, the final number of facial volumes might be less than above,
+            because frames with small faces ( < min_face_size ) are discarded.
+
+        **Returns:**
+
+        ``frontal_patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized frontal
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_size``^2 ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+
+        ``horizontal_patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized horizontal
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_length``*``block_size`` ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+
+        ``vertical_patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized vertical
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_length``*``block_size`` ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+        """
+
+        lenghth, row_num, col_num = all_blocks[0][0].shape
+
+        selected_row = np.int(row_num/2)
+
+        selected_col = np.int(col_num/2)
+
+        frontal_patches = []
+        horizontal_patches = []
+        vertical_patches = []
+
+        # volume - is a list of 3D blocks for a particular stack of facial images.
+        for volume in all_blocks:
+
+            volume_frontal_patches = []
+            volume_horizontal_patches = []
+            volume_vertical_patches = []
+
+            for block in volume:
+
+                frontal_patch = block[0, :, :] # the frontal patch of a block. Size: (row_num x col_num)
+                volume_frontal_patches.append(frontal_patch.flatten())
+
+                horizontal_patch = block[:, selected_row, :] # the central-horizontal patch of a block. Size: (lenghth x col_num), where
+                # lenghth = block_length, col_num = block_size.
+                volume_horizontal_patches.append(horizontal_patch.flatten())
+
+                vertical_patch = block[:, :, selected_col] # the central-vertical patch of a block. Size: (lenghth x row_num)
+                volume_vertical_patches.append(vertical_patch.flatten())
+
+            frontal_patches.append( np.stack(volume_frontal_patches) )
+
+            horizontal_patches.append( np.stack(volume_horizontal_patches) )
+
+            vertical_patches.append( np.stack(volume_vertical_patches) )
+
+        return frontal_patches, horizontal_patches, vertical_patches
+
+
+    #==========================================================================
+    def __select_random_patches_single_list(self, patches, n_patches):
+        """
+        This method is called by ``select_random_patches`` method to process
+        all lists of patches.
+
+        **Parameters:**
+
+        ``patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_size``^2 ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+
+        ``n_patches`` : :py:class:`int`
+            Number of randomly selected patches.
+
+        **Returns:**
+
+        ``selected_patches`` : [2D :py:class:`numpy.ndarray`]
+            An array of selected patches. The dimensionality of the array:
+            (``n_patches`` x ``number_of_features``).
+        """
+
+        all_patches = np.vstack(patches)
+
+        idx = [random.randint( 0, len(all_patches) - 1 ) for _ in range(n_patches)]
+
+        selected_patches = all_patches[idx, :]
+
+        return selected_patches
+
+
+    #==========================================================================
+    def select_random_patches(self, frontal_patches, horizontal_patches, vertical_patches, n_patches):
+        """
+        Select random patches given lists of frontal, central-horizontal and
+        central-vertical patches, as returned by ``extract_patches_from_blocks``
+        method of this class.
+
+        **Parameters:**
+
+        ``frontal_patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized frontal
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_size``^2 ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+
+        ``horizontal_patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized horizontal
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_length``*``block_size`` ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+
+        ``vertical_patches`` : [2D :py:class:`numpy.ndarray`]
+            Each element in the list contains an array of vectorized vertical
+            patches for the particular stack of facial images.
+            The size of each array is:
+            ( (``norm_face_size`` - ``block_size``)^2 x ``block_length``*``block_size`` ).
+            The maximum length of the list is:
+            (``num_of_frames_in_video`` - ``block_length``)
+
+        ``n_patches`` : :py:class:`int`
+            Number of randomly selected patches.
+
+        **Returns:**
+
+        ``selected_frontal_patches`` : [2D :py:class:`numpy.ndarray`]
+            An array of selected frontal patches.
+            The dimensionality of the array:
+            (``n_patches`` x ``number_of_features``).
+
+        ``selected_horizontal_patches`` : [2D :py:class:`numpy.ndarray`]
+            An array of selected horizontal patches.
+            The dimensionality of the array:
+            (``n_patches`` x ``number_of_features``).
+
+        ``selected_vertical_patches`` : [2D :py:class:`numpy.ndarray`]
+            An array of vertical selected patches.
+            The dimensionality of the array:
+            (``n_patches`` x ``number_of_features``).
+        """
+
+        selected_frontal_patches = self.__select_random_patches_single_list(frontal_patches, n_patches)
+
+        selected_horizontal_patches = self.__select_random_patches_single_list(horizontal_patches, n_patches)
+
+        selected_vertical_patches = self.__select_random_patches_single_list(vertical_patches, n_patches)
+
+        return selected_frontal_patches, selected_horizontal_patches, selected_vertical_patches
+
+
+    #==========================================================================
+    def get_sparse_codes_for_patches(self, patches, dictionary):
+        """
+        This function computes a reconstruction sparse codes for a set of patches
+        given dictionary to reconstruct the patches from. The OMP sparse coding
+        algorithm is used for that.
+        The maximum amount of non-zero entries in the sparse code is:
+        ``num_of_features/5.``
+
+        **Parameters:**
+
+        ``patches`` : 2D :py:class:`numpy.ndarray`
+            A vectorized patches to be reconstructed. The dimensionality is:
+            (``n_samples`` x ``n_features``).
+
+        ``dictionary`` : 2D :py:class:`numpy.ndarray`
+            A dictionary to use for patch reconstruction. The dimensions are:
+            (n_words_in_dictionary x n_features)
+
+        **Returns:**
+
+        ``codes`` : 2D :py:class:`numpy.ndarray`
+            An array of reconstruction sparse codes for each patch.
+            The dimensionality is:
+            (``n_samples`` x ``n_words_in_the_dictionary``).
+        """
+
+        algo = 'omp'
+
+        n_nonzero = np.int(dictionary.shape[1]/5.)
+
+        alpha = n_nonzero
+
+        coder = SparseCoder(dictionary=dictionary, transform_n_nonzero_coefs=n_nonzero,
+                            transform_alpha=alpha, transform_algorithm=algo)
+
+        # if a single patch is given of the shape (n_features,) convert it to the shape (1, n_features):
+
+        if len(patches.shape) == 1:
+
+            patches = patches.reshape(1, -1)
+
+        codes = coder.transform(patches)
+
+        return codes
+
+
+    #==========================================================================
+    def get_sparse_codes_for_list_of_patches(self, list_of_patches, dictionary):
+        """
+        Compute sparse codes for each array of vectorized patches in the list.
+        This function just calls ``get_sparse_codes_for_patches`` method
+        for each element of the input list.
+
+        **Parameters:**
+
+        ``patches`` : [2D :py:class:`numpy.ndarray`]
+            A list of vectorized patches to be reconstructed.
+            The dimensionality of each array in the list:
+            (``n_samples`` x ``n_features``).
+
+        ``dictionary`` : 2D :py:class:`numpy.ndarray`
+            A dictionary to use for patch reconstruction. The dimensions are:
+            (n_words_in_dictionary x n_features)
+
+        **Returns:**
+
+        ``video_codes`` : [2D :py:class:`numpy.ndarray`]
+            A list of arrays with reconstruction sparse codes for each patch.
+            The dimensionality of each array in the list is:
+            (``n_samples`` x ``n_words_in_the_dictionary``).
+        """
+
+        video_codes = []
+
+        for idx, patches in enumerate(list_of_patches):
+
+#            print idx
+
+            codes = self.get_sparse_codes_for_patches(patches, dictionary)
+
+            video_codes.append(codes)
+
+        return video_codes
+
+
+    #==========================================================================
+    def load_array_from_hdf5(self, file_name):
+        """
+        Load an array from the hdf5 file given name of the file.
+
+        **Parameters:**
+
+        ``file_name`` : :py:class:`str`
+            Name of the file.
+
+        **Returns:**
+
+        ``data`` : :py:class:`numpy.ndarray`
+            Downloaded array.
+        """
+
+        f = bob.io.base.HDF5File(file_name) #read only
+
+        data = f.read('data') #reads integer
+
+        del f
+
+        return data
+
+
+    #==========================================================================
+    def load_the_dictionaries(self, dictionary_file_names):
+        """
+        Download dictionaries, given names of the files containing them. The
+        dictionaries are precomputed.
+
+        **Parameters:**
+
+        ``dictionary_file_names`` : [:py:class:`str`]
+            A list of filenames containing the dictionary. The filenames must be
+            listed in the following order:
+            [file_name_pointing_to_frontal_dictionary,
+            file_name_pointing_to_horizontal_dictionary,
+            file_name_pointing_to_vertical_dictionary]
+
+        **Returns:**
+
+        ``dictionary_frontal`` : 2D :py:class:`numpy.ndarray`
+            A dictionary to use for reconstruction of frontal patches.
+            The dimensions are: (n_words_in_dictionary x n_features_front)
+
+        ``dictionary_horizontal`` : 2D :py:class:`numpy.ndarray`
+            A dictionary to use for reconstruction of horizontal patches.
+            The dimensions are: (n_words_in_dictionary x n_features_horizont)
+
+        ``dictionary_vertical`` : 2D :py:class:`numpy.ndarray`
+            A dictionary to use for reconstruction of vertical patches.
+            The dimensions are: (n_words_in_dictionary x n_features_vert)
+        """
+
+        dictionary_frontal = self.load_array_from_hdf5(dictionary_file_names[0])
+
+        dictionary_horizontal = self.load_array_from_hdf5(dictionary_file_names[1])
+
+        dictionary_vertical = self.load_array_from_hdf5(dictionary_file_names[2])
+
+        return dictionary_frontal, dictionary_horizontal, dictionary_vertical
+
+
+    #==========================================================================
+    def convert_sparse_codes_to_frame_container(self, sparse_codes):
+        """
+        Convert an input list of lists of 2D arrays / sparse codes into Frame
+        Container. Each frame in the output Frame Container is a 3D array which
+        stacks 3 2D arrays representing particular frame / stack of facial images.
+
+        **Parameters:**
+
+        ``sparse_codes`` : [[2D :py:class:`numpy.ndarray`]]
+            A list of lists of 2D arrays. Each 2D array contains sparse codes
+            of a particular stack of facial images. The length of internal lists
+            is equal to the number of processed frames. The outer list contains
+            the codes for frontal, horizontal and vertical patches, thus the
+            length of an outer list in the context of this class is 3.
+
+        **Returns:**
+
+        ``frame_container`` : FrameContainer
+            FrameContainer containing the frames with sparse codes for the
+            frontal, horizontal and vertical patches. Each frame is a 3D array.
+            The dimensionality of array is:
+            (``3`` x ``n_samples`` x ``n_words_in_the_dictionary``).
+        """
+
+        frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        idx = 0
+
+        for frontal_codes, horizontal_codes, vertical_codes in zip(sparse_codes[0], sparse_codes[1], sparse_codes[2]):
+
+            frame_3d = np.stack([frontal_codes, horizontal_codes, vertical_codes])
+
+            frame_container.add(idx, frame_3d) # add frame to FrameContainer
+
+            idx = idx + 1
+
+        return frame_container
+
+
+    #==========================================================================
+    def comp_hist_of_sparse_codes(self, frames, method):
+        """
+        Compute the histograms of sparse codes.
+
+        **Parameters:**
+
+        ``frame_container`` : FrameContainer
+            FrameContainer containing the frames with sparse codes for the
+            frontal, horizontal and vertical patches. Each frame is a 3D array.
+            The dimensionality of array is:
+            (``3`` x ``n_samples`` x ``n_words_in_the_dictionary``).
+            First array [0,:,:] contains frontal sparse codes.
+            Second array [1,:,:] contains horizontal sparse codes.
+            Third array [2,:,:] contains vertical sparse codes.
+
+        ``method`` : :py:class:`str`
+            Name of the method to be used for combining the sparse codes into
+            a single feature vector. Two options are possible: "mean" and
+            "hist". If "mean" is selected the mean for ``n_samples`` dimension
+            is first computed. The resulting vectors for various types of
+            patches are then concatenated into a single feature vector.
+            If "hist" is selected, the values in the input array are first
+            binarized setting all non-zero elements to one. The rest of the
+            process is similar to the "mean" combination method.
+
+        **Returns:**
+
+        ``frame_container`` : FrameContainer
+            FrameContainer containing the frames with sparse codes for the
+            frontal, horizontal and vertical patches. Each frame is a 3D array.
+            The dimensionality of array is:
+            (``3`` x ``n_samples`` x ``n_words_in_the_dictionary``).
+        """
+
+        histograms = []
+
+        for frame_data in frames:
+
+            frame = frame_data[1]
+
+            if method == "mean":
+
+                frame_codes = np.mean(frame, axis=1)
+
+            if method == "hist":
+
+                frame_codes = np.mean(frame!=0, axis=1)
+
+            for idx, row in enumerate(frame_codes):
+
+                frame_codes[idx,:] = row/np.sum(row)
+
+            hist = frame_codes.flatten()
+
+            histograms.append(hist)
+
+        return histograms
+
+
+    #==========================================================================
+    def convert_arrays_to_frame_container(self, list_of_arrays):
+        """
+        Convert an input list of arrays into Frame Container.
+
+        **Parameters:**
+
+        ``list_of_arrays`` : [:py:class:`numpy.ndarray`]
+            A list of arrays.
+
+        **Returns:**
+
+        ``frame_container`` : FrameContainer
+            FrameContainer containing the feature vectors.
+        """
+
+        frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+
+        for idx, item in enumerate(list_of_arrays):
+
+            frame_container.add(idx, item) # add frame to FrameContainer
+
+        return frame_container
+
+
+    #==========================================================================
+    def __call__(self, frames, annotations):
+        """
+        Compute sparse codes for spatial frontal, spatio-temporal horizontal,
+        and spatio-temporal vertical patches. The codes are computed for all
+        possible stacks of facial images. The maximum possible number of stacks
+        is: (``num_of_frames_in_video`` - ``block_length``).
+        However, this number can be smaller, and is controlled by two arguments
+        of this class: ``min_face_size`` and ``frame_step``.
+
+        If ``self.extract_histograms_flag`` flag is set to ``True`` the
+        histograms of sparse codes will be computed for all possible stacks of
+        facial images.
+
+        **Parameters:**
+
+        ``frames`` : FrameContainer
+            Video data stored in the FrameContainer, see
+            ``bob.bio.video.utils.FrameContainer`` for further details.
+
+        ``annotations`` : :py:class:`dict`
+            A dictionary containing the annotations for each frame in the video.
+            Dictionary structure:
+            ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``,
+            where
+            ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
+            is the dictionary defining the coordinates of the face bounding
+            box in frame N.
+
+        **Returns:**
+
+        ``frame_container`` : FrameContainer
+            If ``self.extract_histograms_flag`` flag is set to ``False`:
+            FrameContainer containing the frames with sparse codes for the
+            frontal, horizontal and vertical patches. Each frame is a 3D array.
+            The dimensionality of each array is:
+            (``3`` x ``n_samples`` x ``n_words_in_the_dictionary``).
+            The first slice in the 3D arrays corresponds to frontal sparse codes,
+            second slice to horizontal, and third to vertical codes.
+
+            If ``self.extract_histograms_flag`` flag is set to ``True`` the
+            histograms of sparse codes will be computed. In this case each
+            frame is a 1D array with dimensionality:
+            (3*``n_words_in_the_dictionary``, )
+        """
+
+        # Convert frame container to 3D array:
+        video = self.convert_frame_cont_to_grayscale_array(frames)
+
+        # Get all blocks from all possible facial stacks:
+        all_blocks = self.get_all_blocks_from_color_channel(video, annotations,
+                                                            self.block_size, self.block_length,
+                                                            self.min_face_size, self.norm_face_size)
+
+        # Extract three sets of patches per each stack of facial images:
+        frontal_patches, horizontal_patches, vertical_patches = self.extract_patches_from_blocks(all_blocks)
+
+        # Download the dictionaries:
+        dictionary_frontal, dictionary_horizontal, dictionary_vertical = self.load_the_dictionaries(self.dictionary_file_names)
+
+        # Compute sparse codes for all patches of all types:
+        frontal_video_codes = self.get_sparse_codes_for_list_of_patches(frontal_patches[::self.frame_step], dictionary_frontal)
+        horizontal_video_codes = self.get_sparse_codes_for_list_of_patches(horizontal_patches[::self.frame_step], dictionary_horizontal)
+        vertical_video_codes = self.get_sparse_codes_for_list_of_patches(vertical_patches[::self.frame_step], dictionary_vertical)
+
+        frame_container = self.convert_sparse_codes_to_frame_container([frontal_video_codes, horizontal_video_codes, vertical_video_codes])
+
+        if self.extract_histograms_flag: # in this case histograms will be extracted in the preprocessor , no feature extraction is needed then
+
+            histograms = self.comp_hist_of_sparse_codes(frame_container, self.method)
+
+            frame_container = self.convert_arrays_to_frame_container(histograms)
+
+        return frame_container
+
+
+    #==========================================================================
+    def write_data( self, frames, file_name ):
+        """
+        Writes the given data (that has been generated using the __call__
+        function of this class) to file. This method overwrites the write_data()
+        method of the Preprocessor class.
+
+        **Parameters:**
+
+        ``frames`` :
+            data returned by the __call__ method of the class.
+
+        ``file_name`` : :py:class:`str`
+            name of the file.
+        """
+
+        self.video_preprocessor.write_data(frames, file_name)
+
+
+    #==========================================================================
+    def read_data( self, file_name ):
+        """
+        Reads the preprocessed data from file.
+        This method overwrites the read_data() method of the Preprocessor class.
+
+        **Parameters:**
+
+        ``file_name`` : :py:class:`str`
+            name of the file.
+
+        **Returns:**
+
+        ``frames`` : :py:class:`bob.bio.video.FrameContainer`
+            Frames stored in the frame container.
+        """
+
+        frames = self.video_preprocessor.read_data(file_name)
+
+        return frames
+
+
+
+
+
+
+
+
diff --git a/bob/pad/face/preprocessor/__init__.py b/bob/pad/face/preprocessor/__init__.py
index c9c5a4225d2dfff5065d2559ecea9c6ebed03b0f..dc5b3685e5595b51cb5c8dd3a72bd59bd2c74927 100644
--- a/bob/pad/face/preprocessor/__init__.py
+++ b/bob/pad/face/preprocessor/__init__.py
@@ -1,6 +1,7 @@
 from .VideoFaceCrop import VideoFaceCrop
 from .ImageFaceCrop import ImageFaceCrop
 from .FrameDifference import FrameDifference
+from .VideoSparseCoding import VideoSparseCoding
 
 
 def __appropriate__(*args):
@@ -25,5 +26,7 @@ __appropriate__(
     VideoFaceCrop,
     ImageFaceCrop,
     FrameDifference,
+    VideoSparseCoding,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
+
diff --git a/doc/img/ROC_iqm_anomaly_detection_aggr_db_grandtest.pdf b/doc/img/ROC_iqm_anomaly_detection_aggr_db_grandtest.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..2175b5b169aadd33c6e78a66654f0c0bde764724
Binary files /dev/null and b/doc/img/ROC_iqm_anomaly_detection_aggr_db_grandtest.pdf differ
diff --git a/doc/img/ROC_iqm_anomaly_detection_aggr_db_ph_ph_vid.pdf b/doc/img/ROC_iqm_anomaly_detection_aggr_db_ph_ph_vid.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..802a0369db76fbc2d0facb533f4d4ea46707bddd
Binary files /dev/null and b/doc/img/ROC_iqm_anomaly_detection_aggr_db_ph_ph_vid.pdf differ
diff --git a/doc/img/ROC_iqm_anomaly_detection_aggr_db_vid_vid_ph.pdf b/doc/img/ROC_iqm_anomaly_detection_aggr_db_vid_vid_ph.pdf
new file mode 100644
index 0000000000000000000000000000000000000000..52bfea975f7d4b3070df1c63c8d5bd27008629dc
Binary files /dev/null and b/doc/img/ROC_iqm_anomaly_detection_aggr_db_vid_vid_ph.pdf differ
diff --git a/doc/index.rst b/doc/index.rst
index fda461eacc018b1a0541c6b1dcb084586916eee3..f06c8df23a7899233e2853025cc9db3ea7f2bd9a 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -22,6 +22,7 @@ Users Guide
 
    installation
    baselines
+   other_pad_algorithms
    references
    resources
    api
diff --git a/doc/other_pad_algorithms.rst b/doc/other_pad_algorithms.rst
new file mode 100644
index 0000000000000000000000000000000000000000..ac7efc11849958c9ad2a2881e70910325625206e
--- /dev/null
+++ b/doc/other_pad_algorithms.rst
@@ -0,0 +1,366 @@
+
+
+.. _bob.pad.face.other_pad_algorithms:
+
+
+===============================
+ Executing Other Algorithms
+===============================
+
+This section explains how to execute face presentation attack detection (PAD) algorithms implemented
+in ``bob.pad.face``.
+
+.. warning::
+
+   Algorithms introduced in this section might be in the process of publishing. Therefore, it is not
+   allowed to publish results introduced in this section without permission of the owner of the package.
+   If you are planning to use the results from this section, please contact the owner of the package first.
+   Please check the ``setup.py`` for contact information.
+
+
+Running face PAD Experiments
+------------------------------
+
+To run the PAD experiments, the ``spoof.py`` script located in ``bin`` directory is used.
+To see the description of the script you can type in the console:
+
+.. code-block:: sh
+
+   $ spoof.py --help
+
+This script is explained in more detail in :ref:`bob.pad.base.experiments`.
+
+Usually it is a good idea to have at least verbose level 2 (i.e., calling
+``spoof.py --verbose --verbose``, or the short version ``spoof.py
+-vv``).
+
+.. note:: **Running in Parallel**
+
+   To run the experiments in parallel, you can define an SGE grid or local host
+   (multi-processing) configurations as explained in
+   :ref:`running_in_parallel`.
+
+   In short, to run in the Idiap SGE grid, you can simply add the ``--grid``
+   command line option, with grid configuration parameters. To run experiments in parallel on
+   the local machine, simply add a ``--parallel <N>`` option, where ``<N>``
+   specifies the number of parallel jobs you want to execute.
+
+
+Database setups and face PAD algorithms are encoded using
+:ref:`bob.bio.base.configuration-files`, all stored inside the package root, in
+the directory ``bob/pad/face/config``. Documentation for each resource
+is available on the section :ref:`bob.pad.face.resources`.
+
+.. warning::
+
+   You **cannot** run experiments just by executing the command line
+   instructions described in this guide. You **need first** to procure yourself
+   the raw data files that correspond to *each* database used here in order to
+   correctly run experiments with those data. Biometric data is considered
+   private date and, under EU regulations, cannot be distributed without a
+   consent or license. You may consult our
+   :ref:`bob.pad.face.resources.databases` resources section for checking
+   currently supported databases and accessing download links for the raw data
+   files.
+
+   Once the raw data files have been downloaded, particular attention should be
+   given to the directory locations of those. Unpack the databases carefully
+   and annotate the root directory where they have been unpacked.
+
+   Then, carefully read the *Databases* section of
+   :ref:`bob.pad.base.installation` on how to correctly setup the
+   ``~/.bob_bio_databases.txt`` file.
+
+   Use the following keywords on the left side of the assignment (see
+   :ref:`bob.pad.face.resources.databases`):
+
+   .. code-block:: text
+
+      [YOUR_REPLAY_ATTACK_DIRECTORY] = /complete/path/to/replayattack-database/
+
+   Notice it is rather important to use the strings as described above,
+   otherwise ``bob.pad.base`` will not be able to correctly load your images.
+
+   Once this step is done, you can proceed with the instructions below.
+
+
+------------
+
+
+.. _bob.pad.face.other_pad_algorithms.aggregated_db:
+
+Anomaly detection based PAD on Aggregated Database
+--------------------------------------------------------
+
+This section summarizes the results of *anomaly detection* based face PAD experiments on the Aggregated Database.
+The description of the database-related settings, which are used to run face PAD algorithms on the Aggregated Db is given here :ref:`bob.pad.face.resources.databases.aggregated_db`. To understand the settings in more details you can check the corresponding configuration file : ``bob/pad/face/config/aggregated_db.py``.
+
+------------
+
+
+Results for *grandtest* protocol
+========================================================================
+
+This section summarizes the evaluation results on the **grandtest** protocol of the Aggregated database for the following face PAD algorithms (for more details click on the corresponding algorithm):
+
+- :ref:`bob.pad.face.resources.face_pad.qm_one_class_gmm`,
+- :ref:`bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db`,
+- :ref:`bob.pad.face.resources.face_pad.qm_lr`,
+- :ref:`bob.pad.face.resources.face_pad.qm_svm_aggregated_db`.
+
+For a more detailed understanding of above pipe-lines you can also check corresponding configuration files:
+
+- ``bob/pad/face/config/qm_one_class_gmm.py``,
+- ``bob/pad/face/config/qm_one_class_svm_aggregated_db.py``,
+- ``bob/pad/face/config/qm_lr.py``,
+- ``bob/pad/face/config/qm_svm_aggregated_db.py``.
+
+To run above algorithms on the :ref:`bob.pad.face.resources.databases.aggregated_db` database, using the ``grandtest`` protocol, execute the following:
+
+.. code-block:: sh
+
+    $ spoof.py aggregated-db qm-one-class-gmm \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_1>
+
+    $ spoof.py aggregated-db qm-one-class-svm-aggregated-db \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_2>
+
+    $ spoof.py aggregated-db qm-lr \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_3>
+
+    $ spoof.py aggregated-db qm-svm-aggregated-db \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_4>
+
+.. tip::
+
+    If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
+    Simply add ``--grid idiap`` argument to the above command. For example:
+
+
+To evaluate the results computing EER, HTER and plotting ROC you can use the
+following command:
+
+.. code-block:: sh
+
+    ./bin/evaluate.py \
+    --dev-files \
+    <PATH_TO_STORE_THE_RESULTS_1>/grandtest/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_2>/grandtest/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_3>/grandtest/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_4>/grandtest/scores/scores-dev  \
+    --eval-files \
+    <PATH_TO_STORE_THE_RESULTS_1>/grandtest/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_2>/grandtest/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_3>/grandtest/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_4>/grandtest/scores/scores-eval \
+    --legends \
+    "IQM + one-class GMM + Aggregated Db" \
+    "IQM + one-class SVM + Aggregated Db" \
+    "IQM + two-class LR  + Aggregated Db" \
+    "IQM + two-class SVM + Aggregated Db" \
+    -F 7 \
+    --criterion EER \
+    --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
+
+The EER/HTER errors for the :ref:`bob.pad.face.resources.databases.aggregated_db` database are summarized in the Table below:
+
++------------------------+----------+----------+
+|      Algorithm         |  EER,\%  |  HTER,\% |
++========================+==========+==========+
+|   IQM + one-class GMM  |  19.336  |  20.769  |
++------------------------+----------+----------+
+|   IQM + one-class SVM  |  28.137  |  34.776  |
++------------------------+----------+----------+
+|   IQM + two-class LR   |  10.354  |  11.856  |
++------------------------+----------+----------+
+|   IQM + two-class SVM  |  12.710  |  15.253  |
++------------------------+----------+----------+
+
+The ROC curves for the particular experiment can be downloaded from here:
+
+:download:`ROC curve <img/ROC_iqm_anomaly_detection_aggr_db_grandtest.pdf>`
+
+------------
+
+
+Results for *photo-photo-video* protocol
+========================================================================
+
+This section summarizes the evaluation results on the **photo-photo-video** protocol of the Aggregated database for the following face PAD algorithms (for more details click on the corresponding algorithm):
+
+- :ref:`bob.pad.face.resources.face_pad.qm_one_class_gmm`,
+- :ref:`bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db`,
+- :ref:`bob.pad.face.resources.face_pad.qm_lr`,
+- :ref:`bob.pad.face.resources.face_pad.qm_svm_aggregated_db`.
+
+For a more detailed understanding of above pipe-lines you can also check corresponding configuration files:
+
+- ``bob/pad/face/config/qm_one_class_gmm.py``,
+- ``bob/pad/face/config/qm_one_class_svm_aggregated_db.py``,
+- ``bob/pad/face/config/qm_lr.py``,
+- ``bob/pad/face/config/qm_svm_aggregated_db.py``.
+
+To run above algorithms on the :ref:`bob.pad.face.resources.databases.aggregated_db` database, using the ``photo-photo-video`` protocol, execute the following:
+
+.. code-block:: sh
+
+    $ spoof.py aggregated-db qm-one-class-gmm \
+    --protocol photo-photo-video \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_1>
+
+    $ spoof.py aggregated-db qm-one-class-svm-aggregated-db \
+    --protocol photo-photo-video \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_2>
+
+    $ spoof.py aggregated-db qm-lr \
+    --protocol photo-photo-video \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_3>
+
+    $ spoof.py aggregated-db qm-svm-aggregated-db \
+    --protocol photo-photo-video \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_4>
+
+.. tip::
+
+    If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
+    Simply add ``--grid idiap`` argument to the above command. For example:
+
+
+To evaluate the results computing EER, HTER and plotting ROC you can use the
+following command:
+
+.. code-block:: sh
+
+    ./bin/evaluate.py \
+    --dev-files \
+    <PATH_TO_STORE_THE_RESULTS_1>/photo-photo-video/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_2>/photo-photo-video/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_3>/photo-photo-video/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_4>/photo-photo-video/scores/scores-dev  \
+    --eval-files \
+    <PATH_TO_STORE_THE_RESULTS_1>/photo-photo-video/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_2>/photo-photo-video/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_3>/photo-photo-video/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_4>/photo-photo-video/scores/scores-eval \
+    --legends \
+    "IQM + one-class GMM + Aggregated Db" \
+    "IQM + one-class SVM + Aggregated Db" \
+    "IQM + two-class LR  + Aggregated Db" \
+    "IQM + two-class SVM + Aggregated Db" \
+    -F 7 \
+    --criterion EER \
+    --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
+
+The EER/HTER errors for the :ref:`bob.pad.face.resources.databases.aggregated_db` database are summarized in the Table below:
+
++------------------------+----------+----------+
+|      Algorithm         |  EER,\%  |  HTER,\% |
++========================+==========+==========+
+|   IQM + one-class GMM  |  22.075  |  14.470  |
++------------------------+----------+----------+
+|   IQM + one-class SVM  |  35.537  |  24.317  |
++------------------------+----------+----------+
+|   IQM + two-class LR   |  10.184  |  30.132  |
++------------------------+----------+----------+
+|   IQM + two-class SVM  |  10.527  |  21.926  |
++------------------------+----------+----------+
+
+The ROC curves for the particular experiment can be downloaded from here:
+
+:download:`ROC curve <img/ROC_iqm_anomaly_detection_aggr_db_ph_ph_vid.pdf>`
+
+------------
+
+
+Results for *video-video-photo* protocol
+========================================================================
+
+This section summarizes the evaluation results on the **video-video-photo** protocol of the Aggregated database for the following face PAD algorithms (for more details click on the corresponding algorithm):
+
+- :ref:`bob.pad.face.resources.face_pad.qm_one_class_gmm`,
+- :ref:`bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db`,
+- :ref:`bob.pad.face.resources.face_pad.qm_lr`,
+- :ref:`bob.pad.face.resources.face_pad.qm_svm_aggregated_db`.
+
+For a more detailed understanding of above pipe-lines you can also check corresponding configuration files:
+
+- ``bob/pad/face/config/qm_one_class_gmm.py``,
+- ``bob/pad/face/config/qm_one_class_svm_aggregated_db.py``,
+- ``bob/pad/face/config/qm_lr.py``,
+- ``bob/pad/face/config/qm_svm_aggregated_db.py``.
+
+To run above algorithms on the :ref:`bob.pad.face.resources.databases.aggregated_db` database, using the ``video-video-photo`` protocol, execute the following:
+
+.. code-block:: sh
+
+    $ spoof.py aggregated-db qm-one-class-gmm \
+    --protocol video-video-photo \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_1>
+
+    $ spoof.py aggregated-db qm-one-class-svm-aggregated-db \
+    --protocol video-video-photo \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_2>
+
+    $ spoof.py aggregated-db qm-lr \
+    --protocol video-video-photo \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_3>
+
+    $ spoof.py aggregated-db qm-svm-aggregated-db \
+    --protocol video-video-photo \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS_4>
+
+.. tip::
+
+    If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
+    Simply add ``--grid idiap`` argument to the above command. For example:
+
+
+To evaluate the results computing EER, HTER and plotting ROC you can use the
+following command:
+
+.. code-block:: sh
+
+    ./bin/evaluate.py \
+    --dev-files \
+    <PATH_TO_STORE_THE_RESULTS_1>/video-video-photo/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_2>/video-video-photo/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_3>/video-video-photo/scores/scores-dev  \
+    <PATH_TO_STORE_THE_RESULTS_4>/video-video-photo/scores/scores-dev  \
+    --eval-files \
+    <PATH_TO_STORE_THE_RESULTS_1>/video-video-photo/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_2>/video-video-photo/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_3>/video-video-photo/scores/scores-eval \
+    <PATH_TO_STORE_THE_RESULTS_4>/video-video-photo/scores/scores-eval \
+    --legends \
+    "IQM + one-class GMM + Aggregated Db" \
+    "IQM + one-class SVM + Aggregated Db" \
+    "IQM + two-class LR  + Aggregated Db" \
+    "IQM + two-class SVM + Aggregated Db" \
+    -F 7 \
+    --criterion EER \
+    --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
+
+The EER/HTER errors for the :ref:`bob.pad.face.resources.databases.aggregated_db` database are summarized in the Table below:
+
++------------------------+----------+----------+
+|      Algorithm         |  EER,\%  |  HTER,\% |
++========================+==========+==========+
+|   IQM + one-class GMM  |  13.503  |  29.794  |
++------------------------+----------+----------+
+|   IQM + one-class SVM  |  18.234  |  39.502  |
++------------------------+----------+----------+
+|   IQM + two-class LR   |  1.499   |  30.268  |
++------------------------+----------+----------+
+|   IQM + two-class SVM  |  1.422   |  24.901  |
++------------------------+----------+----------+
+
+The ROC curves for the particular experiment can be downloaded from here:
+
+:download:`ROC curve <img/ROC_iqm_anomaly_detection_aggr_db_vid_vid_ph.pdf>`
+
+------------
+
+
+.. include:: links.rst
+
+
diff --git a/doc/resources.rst b/doc/resources.rst
index ca93b11a2ae35163884c7109e014a2909e274cb0..971e96dea904df398e70508f8866ada22efe82b0 100644
--- a/doc/resources.rst
+++ b/doc/resources.rst
@@ -135,3 +135,32 @@ Frame differences based features (motion analysis) + SVM for Aggregated Database
 
 .. automodule:: bob.pad.face.config.frame_diff_svm_aggregated_db
    :members:
+
+
+.. _bob.pad.face.resources.face_pad.qm_lr:
+
+Image Quality Measures as features of facial region + Logistic Regression
+============================================================================================================================
+
+.. automodule:: bob.pad.face.config.qm_lr
+   :members:
+
+
+.. _bob.pad.face.resources.face_pad.qm_one_class_gmm:
+
+Image Quality Measures as features of facial region + GMM-based one-class classifier (anomaly detector)
+============================================================================================================================
+
+.. automodule:: bob.pad.face.config.qm_one_class_gmm
+   :members:
+
+
+.. _bob.pad.face.resources.face_pad.qm_one_class_svm_aggregated_db:
+
+Image Quality Measures as features of facial region + one-class SVM classifier (anomaly detector) for Aggregated Database
+============================================================================================================================
+
+.. automodule:: bob.pad.face.config.qm_one_class_svm_aggregated_db
+   :members:
+
+
diff --git a/setup.py b/setup.py
index 7ca82566e5123e8dae0406fbcbf9f910df699780..474dcb0598e14cd18370fa74410b80362e8592e5 100644
--- a/setup.py
+++ b/setup.py
@@ -103,13 +103,24 @@ setup(
         # registered preprocessors:
         'bob.pad.preprocessor': [
             'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
+
+            # The sparse coding based preprocessors
+            'sparse-coding-preprocessor = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor',
+            'sparse-coding-preprocessor-10-5-16 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_16',
+            'sparse-coding-preprocessor-10-5-32 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_32',
+            'sparse-coding-preprocessor-10-5-64 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_64',
+            'sparse-coding-preprocessor-10-5-128 = bob.pad.face.config.preprocessor.video_sparse_coding:preprocessor_10_5_128',
             ],
 
-        # registered preprocessors:
+        # registered extractors:
         'bob.pad.extractor': [
             'video-lbp-histogram-extractor-n8r1-uniform = bob.pad.face.config.extractor.video_lbp_histogram:video_lbp_histogram_extractor_n8r1_uniform',
             'video-quality-measure-galbally-msu = bob.pad.face.config.extractor.video_quality_measure:video_quality_measure_galbally_msu',
             'frame-diff-feat-extr-w20-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w20_over0',
+
+            # extractors for sparse coding:
+            'hist-of-sparse-codes-mean = bob.pad.face.config.extractor.video_hist_of_sparse_codes:extractor_mean',
+            'hist-of-sparse-codes-hist = bob.pad.face.config.extractor.video_hist_of_sparse_codes:extractor_hist',
             ],
 
         # registered algorithms: