diff --git a/.gitignore b/.gitignore
index 722dfac6807f6eb07f51e93d4f486817af4a5e9b..936a57230138b1ba6b0803aa8bd4cfed13ab69c8 100644
--- a/.gitignore
+++ b/.gitignore
@@ -14,5 +14,4 @@ dist
 record.txt
 results
 submitted.sql3
-config
 temp*
diff --git a/bob/pad/face/config/preprocessor/face_feature_crop_quality_check.py b/bob/pad/face/config/preprocessor/face_feature_crop_quality_check.py
new file mode 100644
index 0000000000000000000000000000000000000000..3dfc53f477c6caf0f0729c2321d4589becabf428
--- /dev/null
+++ b/bob/pad/face/config/preprocessor/face_feature_crop_quality_check.py
@@ -0,0 +1,490 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Tue Oct  9 13:53:58 2018
+
+@author: Olegs Nikisins
+"""
+
+# =============================================================================
+# Import here:
+
+from bob.bio.base.preprocessor import Preprocessor
+
+from bob.bio.video.preprocessor import Wrapper
+
+import os
+
+import importlib
+
+from bob.pad.face.utils.patch_utils import reshape_flat_patches
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor import BlockPatch
+
+
+# =============================================================================
+# define preprocessor class:
+
+class _Preprocessor(Preprocessor):
+    """
+    The following steps are performed:
+
+    1. Detect and align the face.
+
+    2. Assess the quality of the face image.
+
+    3. Extract patch / patches from the face.
+
+    **Parameters:**
+
+    ``face_crop_align`` : object
+        An instance of the FaceCropAlign preprocessor to be used in step one.
+
+    ``config_file``: py:class:`string`
+        Relative name of the config file containing
+        quality assessment function.
+        Example: ``celeb_a/quality_assessment_config.py``.
+
+    ``config_group``: py:class:`string`
+        Group/package name containing the configuration file.
+        Example: ``bob.pad.face.config.quality_assessment``.
+
+    ``block_patch`` : object
+        An instance of the BlockPatch preprocessor to be used in step 3.
+
+    ``patch_reshape_parameters`` : [int] or None
+        The parameters to be used for patch reshaping. The patch is
+        vectorized. Example:
+        ``patch_reshape_parameters = [4, 8, 8]``, then the patch of the
+        size (256,) will be reshaped to (4,8,8) dimensions. Only 2D and 3D
+        patches are supported.
+        Default: None.
+
+    ``patch_num`` : int OR None
+        Am index of the patch to be selected from all extracted patches.
+        Default: None
+    """
+
+    def __init__(self,
+                 face_crop_align,
+                 config_file,
+                 config_group,
+                 block_patch,
+                 patch_reshape_parameters = None,
+                 patch_num = None):
+
+        super(_Preprocessor, self).__init__()
+
+        self.face_crop_align = face_crop_align
+        self.config_file = config_file
+        self.config_group = config_group
+        self.block_patch = block_patch
+        self.patch_reshape_parameters = patch_reshape_parameters
+        self.patch_num = patch_num
+
+
+    def __call__(self, data, annotations):
+        """
+        **Parameters:**
+
+        ``data`` : 2D or 3D :py:class:`numpy.ndarray`
+            Input image (RGB or gray-scale) or None.
+
+        ``annotations`` : :py:class:`dict` or None
+            A dictionary containing annotations of the face bounding box.
+            Dictionary must be as follows:
+            ``{'topleft': (row, col), 'bottomright': (row, col)}``
+            Default: None .
+        """
+
+        face_data = self.face_crop_align(data, annotations)
+
+        if face_data is None:
+
+            return None
+
+        relative_mod_name = '.' + os.path.splitext(self.config_file)[0].replace(os.path.sep, '.')
+
+        config_module = importlib.import_module(relative_mod_name, self.config_group)
+
+        quality_flag = config_module.assess_quality(face_data, **config_module.assess_quality_kwargs)
+
+        if quality_flag:
+
+            print ("Good quality data.")
+
+            patches = self.block_patch(face_data, annotations=None)
+
+            if self.patch_reshape_parameters is not None:
+
+                patches = reshape_flat_patches(patches, self.patch_reshape_parameters)
+
+            if self.patch_num is not None:
+
+                patches = patches[self.patch_num]
+
+        else:
+
+            print ("Bad quality data.")
+            return None
+
+        return patches
+
+
+# =============================================================================
+# define instance of the preprocessor:
+
+"""
+Preprocessor to be used for Color channel.
+"""
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+FACE_SIZE = 128  # The size of the resulting face
+RGB_OUTPUT_FLAG = True  # RGB output
+USE_FACE_ALIGNMENT = True  # use annotations
+MAX_IMAGE_SIZE = 1920  # no limiting here
+FACE_DETECTION_METHOD = "mtcnn"  # DON'T use ANNOTATIONS, valid for CelebA only
+MIN_FACE_SIZE = 50  # skip small faces
+
+_face_crop_align = FaceCropAlign(face_size = FACE_SIZE,
+                                 rgb_output_flag = RGB_OUTPUT_FLAG,
+                                 use_face_alignment = USE_FACE_ALIGNMENT,
+                                 max_image_size = MAX_IMAGE_SIZE,
+                                 face_detection_method = FACE_DETECTION_METHOD,
+                                 min_face_size = MIN_FACE_SIZE)
+
+"""
+Parameters to be used for quality assessment.
+"""
+
+CONFIG_FILE = "celeb_a/quality_assessment_config_128.py"
+
+CONFIG_GROUP = "bob.pad.face.config.quality_assessment"
+
+"""
+Define an instance of the BlockPatch preprocessor.
+"""
+
+PATCH_SIZE = 64
+STEP = 32
+
+_block_patch = BlockPatch(patch_size = PATCH_SIZE,
+                          step = STEP,
+                          use_annotations_flag = False)
+
+"""
+define an instance of the _Preprocessor class.
+"""
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+_image_extractor_0 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 0)
+
+face_feature_0_crop_rgb = Wrapper(preprocessor = _image_extractor_0,
+                                  frame_selector = _frame_selector)
+
+
+# =============================================================================
+_image_extractor_1 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 1)
+
+face_feature_1_crop_rgb = Wrapper(preprocessor = _image_extractor_1,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_2 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 2)
+
+face_feature_2_crop_rgb = Wrapper(preprocessor = _image_extractor_2,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_3 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 3)
+
+face_feature_3_crop_rgb = Wrapper(preprocessor = _image_extractor_3,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_4 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 4)
+
+face_feature_4_crop_rgb = Wrapper(preprocessor = _image_extractor_4,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_5 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 5)
+
+face_feature_5_crop_rgb = Wrapper(preprocessor = _image_extractor_5,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_6 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 6)
+
+face_feature_6_crop_rgb = Wrapper(preprocessor = _image_extractor_6,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_7 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 7)
+
+face_feature_7_crop_rgb = Wrapper(preprocessor = _image_extractor_7,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+_image_extractor_8 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 8)
+
+face_feature_8_crop_rgb = Wrapper(preprocessor = _image_extractor_8,
+                                  frame_selector = _frame_selector)
+
+# =============================================================================
+# Extractors for obtaining RGB patches of the size 3x32x32
+
+PATCH_SIZE = 32
+STEP = 32
+
+_block_patch_32x32 = BlockPatch(patch_size = PATCH_SIZE,
+                          step = STEP,
+                          use_annotations_flag = False)
+
+_image_extractor_0_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 0)
+
+face_feature_0_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_0_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_1_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 1)
+
+face_feature_1_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_1_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_2_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 2)
+
+face_feature_2_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_2_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_3_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 3)
+
+face_feature_3_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_3_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_4_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 4)
+
+face_feature_4_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_4_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_5_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 5)
+
+face_feature_5_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_5_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_6_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 6)
+
+face_feature_6_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_6_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_7_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 7)
+
+face_feature_7_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_7_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_8_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 8)
+
+face_feature_8_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_8_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_9_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 9)
+
+face_feature_9_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_9_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_10_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 10)
+
+face_feature_10_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_10_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_11_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 11)
+
+face_feature_11_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_11_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_12_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 12)
+
+face_feature_12_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_12_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_13_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 13)
+
+face_feature_13_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_13_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_14_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 14)
+
+face_feature_14_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_14_32x32,
+                                  frame_selector = _frame_selector)
+
+
+_image_extractor_15_32x32 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_32x32,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 15)
+
+face_feature_15_32x32_crop_rgb = Wrapper(preprocessor = _image_extractor_15_32x32,
+                                  frame_selector = _frame_selector)
+
+
+# =============================================================================
+# Extractors for obtaining RGB patches (patch is an entire face in this case) of the size 3x128x128
+
+PATCH_SIZE = 128
+STEP = 1
+
+_block_patch_128x128 = BlockPatch(patch_size = PATCH_SIZE,
+                          step = STEP,
+                          use_annotations_flag = False)
+
+_image_extractor_0_128x128 = _Preprocessor(face_crop_align = _face_crop_align,
+                                   config_file = CONFIG_FILE,
+                                   config_group = CONFIG_GROUP,
+                                   block_patch = _block_patch_128x128,
+                                   patch_reshape_parameters = [3, PATCH_SIZE, PATCH_SIZE],
+                                   patch_num = 0)
+
+face_feature_0_128x128_crop_rgb = Wrapper(preprocessor = _image_extractor_0_128x128,
+                                  frame_selector = _frame_selector)
+
+
+
diff --git a/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config.py b/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config.py
index 70420d5ef8f40d634459af86e79af3c1b9489b75..cdf772f2f63e5fb331e40b1222373ad2259ded5a 100644
--- a/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config.py
+++ b/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config.py
@@ -33,57 +33,7 @@ from bob.bio.video.preprocessor import Wrapper
 
 import numpy as np
 
-# =============================================================================
-def detect_eyes_in_bw_image(image):
-    """
-    Detect eyes in the image using OpenCV.
-
-    **Parameters:**
-
-    ``image`` : 2D :py:class:`numpy.ndarray`
-        A BW image to detect the eyes in.
-
-    **Returns:**
-
-    ``eyes`` : 2D :py:class:`numpy.ndarray`
-        An array containing coordinates of the bounding boxes of detected eyes.
-        The dimensionality of the array:
-        ``num_of_detected_eyes x coordinates_of_bbx``
-    """
-
-    eye_model = pkg_resources.resource_filename('bob.pad.face.config',
-                                                'quality_assessment/models/eye_detector.xml')
-
-    eye_cascade = cv2.CascadeClassifier(eye_model)
-
-    eyes = eye_cascade.detectMultiScale(image)
-
-    return eyes
-
-
-# =============================================================================
-def load_datafile(file_name):
-    """
-    Load data from file given filename. Here the data file is an hdf5 file
-    containing a framecontainer with one frame. The data in the frame is
-    a BW image of the facial region.
-
-    **Parameters:**
-
-    ``file_name`` : str
-        Absolute name of the file.
-
-    **Returns:**
-
-    ``data`` : 2D :py:class:`numpy.ndarray`
-        Data array containing the image of the facial region.
-    """
-
-    frame_container = Wrapper().read_data(file_name)
-
-    data = frame_container[0][1]
-
-    return data
+from bob.pad.face.config.quality_assessment.celeb_a.quality_assessment_config_128 import detect_eyes_in_bw_image, load_datafile, assess_quality
 
 
 # =============================================================================
@@ -98,59 +48,3 @@ assess_quality_kwargs = {}
 assess_quality_kwargs["eyes_expected"] = eyes_expected
 assess_quality_kwargs["threshold"] = 7
 
-
-# =============================================================================
-def assess_quality(data, eyes_expected, threshold):
-    """
-    Assess the quality of the data sample, which in this case is an image of
-    the face of the size 64x64 pixels. The quality assessment is based on the
-    eye detection. If two eyes are detected, and they are located in the
-    pre-defined positions, then quality is good, otherwise the quality is low.
-
-    **Parameters:**
-
-    ``data`` : 2D :py:class:`numpy.ndarray`
-        Data array containing the image of the facial region. The size of the
-        image is 64x64.
-
-    ``eyes_expected`` : list
-        A list containing expected coordinates of the eyes. The format is
-        as follows:
-        [ [left_y, left_x], [right_y, right_x] ]
-
-    ``threshold`` : int
-        A maximum allowed distance between expected and detected centers of
-        the eyes.
-
-    **Returns:**
-
-    ``quality_flag`` : bool
-        ``True`` for good quality data, ``False`` otherwise.
-    """
-
-    quality_flag = False
-
-    eyes = detect_eyes_in_bw_image(data)
-
-    if isinstance(eyes, np.ndarray):
-
-        if eyes.shape[0] == 2: # only consider the images with two eyes detected
-
-            # coordinates of detected centers of the eyes: [ [left_y, left_x], [right_y, right_x] ]:
-            eyes_detected = []
-            for (ex,ey,ew,eh) in eyes:
-                eyes_detected.append( [ey + eh/2., ex + ew/2.] )
-
-            dists = [] # dits between detected and expected:
-            for a, b in zip(eyes_detected, eyes_expected):
-                dists.append( np.linalg.norm(np.array(a)-np.array(b)) )
-
-            max_dist = np.max(dists)
-
-            if max_dist < threshold:
-
-                quality_flag = True
-
-    return quality_flag
-
-
diff --git a/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config_128.py b/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config_128.py
new file mode 100644
index 0000000000000000000000000000000000000000..3eb4e0879d14ff97905c451e560fbd2103c37271
--- /dev/null
+++ b/bob/pad/face/config/quality_assessment/celeb_a/quality_assessment_config_128.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Quality assessment configuration file for the CelebA database to be used
+with quality assessment script.
+
+Note: this config checks the quality of the preprocessed(!) data. Here the
+preprocessed data is sored in ``.hdf5`` files, as a frame container with
+one frame. Frame contains a BW image of the facial regions of the size
+128x128 pixels.
+
+The config file MUST contain at least the following functions:
+
+``load_datafile(file_name)`` - returns the ``data`` given ``file_name``, and
+
+``assess_quality(data, **assess_quality_kwargs)`` - returns ``True`` for good
+quality ``data``, and ``False`` for low quality data, and
+
+``assess_quality_kwargs`` - a dictionary with kwargs for ``assess_quality()``
+function.
+
+@author: Olegs Nikisins
+"""
+
+# =============================================================================
+# Import here:
+
+import pkg_resources
+
+import cv2
+
+from bob.bio.video.preprocessor import Wrapper
+
+import numpy as np
+
+import bob.ip.color
+
+# =============================================================================
+def detect_eyes_in_bw_image(image):
+    """
+    Detect eyes in the image using OpenCV.
+
+    **Parameters:**
+
+    ``image`` : 2D :py:class:`numpy.ndarray`
+        A BW image to detect the eyes in.
+
+    **Returns:**
+
+    ``eyes`` : 2D :py:class:`numpy.ndarray`
+        An array containing coordinates of the bounding boxes of detected eyes.
+        The dimensionality of the array:
+        ``num_of_detected_eyes x coordinates_of_bbx``
+    """
+
+    eye_model = pkg_resources.resource_filename('bob.pad.face.config',
+                                                'quality_assessment/models/eye_detector.xml')
+
+    eye_cascade = cv2.CascadeClassifier(eye_model)
+
+    if len(image.shape) == 3:
+
+        image = bob.ip.color.rgb_to_gray(image)
+
+    eyes = eye_cascade.detectMultiScale(image)
+
+    return eyes
+
+
+# =============================================================================
+def load_datafile(file_name):
+    """
+    Load data from file given filename. Here the data file is an hdf5 file
+    containing a framecontainer with one frame. The data in the frame is
+    a BW image of the facial region.
+
+    **Parameters:**
+
+    ``file_name`` : str
+        Absolute name of the file.
+
+    **Returns:**
+
+    ``data`` : 2D :py:class:`numpy.ndarray`
+        Data array containing the image of the facial region.
+    """
+
+    frame_container = Wrapper().read_data(file_name)
+
+    data = frame_container[0][1]
+
+    return data
+
+
+# =============================================================================
+face_size = 128
+eyes_distance=((face_size + 1) / 2.)
+eyes_center=(face_size / 4., (face_size - 0.5) / 2.)
+
+eyes_expected = [[eyes_center[0], eyes_center[1]-eyes_distance/2.],
+                 [eyes_center[0], eyes_center[1]+eyes_distance/2.]]
+
+assess_quality_kwargs = {}
+assess_quality_kwargs["eyes_expected"] = eyes_expected
+assess_quality_kwargs["threshold"] = 10
+
+
+# =============================================================================
+def assess_quality(data, eyes_expected, threshold):
+    """
+    Assess the quality of the data sample, which in this case is an image of
+    the face of the size (face_size x face_size) pixels. The quality assessment is based on the
+    eye detection. If two eyes are detected, and they are located in the
+    pre-defined positions, then quality is good, otherwise the quality is low.
+
+    **Parameters:**
+
+    ``data`` : 2D :py:class:`numpy.ndarray`
+        Data array containing the image of the facial region. The size of the
+        image is (face_size x face_size).
+
+    ``eyes_expected`` : list
+        A list containing expected coordinates of the eyes. The format is
+        as follows:
+        [ [left_y, left_x], [right_y, right_x] ]
+
+    ``threshold`` : int
+        A maximum allowed distance between expected and detected centers of
+        the eyes.
+
+    **Returns:**
+
+    ``quality_flag`` : bool
+        ``True`` for good quality data, ``False`` otherwise.
+    """
+
+    quality_flag = False
+
+    eyes = detect_eyes_in_bw_image(data)
+
+    if isinstance(eyes, np.ndarray):
+
+        if eyes.shape[0] == 2: # only consider the images with two eyes detected
+
+            # coordinates of detected centers of the eyes: [ [left_y, left_x], [right_y, right_x] ]:
+            eyes_detected = []
+            for (ex,ey,ew,eh) in eyes:
+                eyes_detected.append( [ey + eh/2., ex + ew/2.] )
+
+            dists = [] # dits between detected and expected:
+            for a, b in zip(eyes_detected, eyes_expected):
+                dists.append( np.linalg.norm(np.array(a)-np.array(b)) )
+
+            max_dist = np.max(dists)
+
+            if max_dist < threshold:
+
+                quality_flag = True
+
+    return quality_flag
+
+
diff --git a/bob/pad/face/test/test.py b/bob/pad/face/test/test.py
index f2dacebefb043d6f8b9738bd78165c97456392e0..cf10dcab76da3c08d8f8e27954b8a60babf8bc77 100644
--- a/bob/pad/face/test/test.py
+++ b/bob/pad/face/test/test.py
@@ -48,6 +48,10 @@ from bob.bio.video.utils import FrameSelector
 
 from ..preprocessor import BlockPatch
 
+from bob.pad.face.config.preprocessor.face_feature_crop_quality_check import face_feature_0_128x128_crop_rgb
+
+from bob.pad.face.utils.patch_utils import reshape_flat_patches
+
 
 def test_detect_face_landmarks_in_image_mtcnn():
 
@@ -309,6 +313,56 @@ def test_video_face_crop_align_block_patch():
     assert data_preprocessed[1][1].shape == (9, 12288)
 
 
+# =============================================================================
+def test_preproc_with_quality_check():
+    """
+    Test _Preprocessor cropping the face and checking the quality of the image
+    applying eye detection, and asserting if they are in the expected positions.
+    """
+
+    # =========================================================================
+    # prepare the test data:
+    image = load(datafile('test_image.png', 'bob.pad.face.test'))
+
+    annotations = None
+
+    video, annotations = convert_image_to_video_data(image, annotations, 2)
+
+    # =========================================================================
+    # test the preprocessor:
+    data_preprocessed = face_feature_0_128x128_crop_rgb(video)
+
+    assert data_preprocessed is None
+
+
+# =============================================================================
+def test_reshape_flat_patches():
+    """
+    Test reshape_flat_patches function.
+    """
+
+    image = load(datafile('test_image.png', 'bob.pad.face.test'))
+
+    patch1 = image[0,0:10,0:10]
+    patch2 = image[1,0:10,0:10]
+
+    patches = np.stack([patch1.flatten(), patch2.flatten()])
+    patches_3d = reshape_flat_patches(patches, (10, 10))
+
+    assert np.all(patch1 == patches_3d[0])
+    assert np.all(patch2 == patches_3d[1])
+
+    # =========================================================================
+    patch1 = image[:,0:10,0:10]
+    patch2 = image[:,1:11,1:11]
+
+    patches = np.stack([patch1.flatten(), patch2.flatten()])
+    patches_3d = reshape_flat_patches(patches, (3, 10, 10))
+
+    assert np.all(patch1 == patches_3d[0])
+    assert np.all(patch2 == patches_3d[1])
+
+
 #==============================================================================
 def test_frame_difference():
     """
diff --git a/bob/pad/face/utils/patch_utils.py b/bob/pad/face/utils/patch_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..13ac2aa24cfa4124ebbbd3070eb44b6ddac3c4e1
--- /dev/null
+++ b/bob/pad/face/utils/patch_utils.py
@@ -0,0 +1,104 @@
+#!/usr/bin/env python3
+# -*- coding: utf-8 -*-
+"""
+Created on Fri Jan 18 17:39:42 2019
+
+@author: Olegs Nikisins
+"""
+
+# =============================================================================
+# Import what is needed here:
+
+import numpy as np
+
+
+# =============================================================================
+# Main body:
+
+def reshape_flat_patches(patches, patch_reshape_parameters = None):
+    """
+    Reshape a set of flattened patches into original dimensions, 2D or 3D
+
+    **Parameters:**
+
+    ``patches`` : 2D :py:class:`numpy.ndarray`
+        An array containing flattened patches. The dimensions are:
+        ``num_patches x len_of_flat_patch``
+
+    ``patch_reshape_parameters`` : [int] or None
+        The parameters to be used for patch reshaping. The loaded patch is
+        vectorized. Example:
+        ``patch_reshape_parameters = [4, 8, 8]``, then the patch of the
+        size (256,) will be reshaped to (4,8,8) dimensions. Only 2D and 3D
+        patches are supported.
+        Default: None.
+
+    **Returns:**
+
+    ``patches_3d`` : [2D or 3D :py:class:`numpy.ndarray`]
+        A list of patches converted to the original dimensions.
+    """
+
+    patches_3d = []
+
+    for patch in patches:
+
+        if patch_reshape_parameters is not None:
+
+            # The dimensionality of the reshaped patch:
+            new_shape = [np.int(len(patch)/(patch_reshape_parameters[-2]*patch_reshape_parameters[-1]))] + list(patch_reshape_parameters[-2:])
+
+            patch = np.squeeze(patch.reshape(new_shape))
+
+        patches_3d.append(patch)
+
+    return patches_3d
+
+
+# =============================================================================
+def mean_std_patch_norm(patches, channel_means = None, channel_stds = None):
+    """
+    Apply mean-std normalization to the patches channel-wise.
+
+    **Parameters:**
+
+    ``patches`` : [2D or 3D :py:class:`numpy.ndarray`]
+        A list of patches converted to the original dimensions.
+
+    ``channel_means`` : [float] or None
+        The channel-wise mean values to be used for mean-std normalization
+        of the patches. Only normalization of 3D patches is currently
+        supported.
+        Default: None.
+
+    ``channel_stds`` : [float] or None
+        The channel-wise std values to be used for mean-std normalization
+        of the patches. Only normalization of 3D patches is currently
+        supported.
+        Default: None.
+
+    **Returns:**
+
+    ``patches_norm_3d`` : [2D or 3D :py:class:`numpy.ndarray`]
+        A list of patches normalized channel-wise.
+    """
+
+    patches_norm_3d = []
+
+    for patch in patches:
+
+        if channel_means is not None: # if normalization parameters are given
+
+            patch = patch.astype(np.float) # convert to float for normalization
+
+            if len(patch.shape) == 3: # Only normalization of 3D patches is currently handled
+
+                for idx, patch_channel in enumerate(patch): # for all channels
+
+                    patch[idx,:,:] = (patch_channel - channel_means[idx]) / channel_stds[idx]
+
+        patches_norm_3d.append(patch)
+
+    return patches_norm_3d
+
+
diff --git a/doc/index.rst b/doc/index.rst
index 0fef703937fc291068066e827ef6df565ce6aca4..20747b6c704d09a5febf71732f3e9867d82e0eb7 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -23,6 +23,7 @@ Users Guide
    baselines
    other_pad_algorithms
    pulse
+   mc_autoencoder_pad
    references
    resources
    api
diff --git a/doc/mc_autoencoder_pad.rst b/doc/mc_autoencoder_pad.rst
new file mode 100644
index 0000000000000000000000000000000000000000..f59cbdf19bc1726e218e6b5eb81fdeb34f9c300c
--- /dev/null
+++ b/doc/mc_autoencoder_pad.rst
@@ -0,0 +1,88 @@
+
+
+.. _bob.pad.face.mc_autoencoder_pad:
+
+
+=============================================
+ Multi-channel face PAD using autoencoders
+=============================================
+
+This section explains how to run a complete face PAD experiment using multi-channel autoencoder-based face PAD system, as well as a training work-flow.
+
+The system discussed in this section is introduced the following publication [NGM19]_. It is **strongly recommended** to check the publication for better understanding
+of the described work-flow.
+
+.. warning::
+
+   Algorithms introduced in this section might be in the process of publishing. Therefore, it is not
+   allowed to publish results introduced in this section without permission of the owner of the package.
+   If you are planning to use the results from this section, please contact the owner of the package first.
+   Please check the ``setup.py`` for contact information.
+
+
+Running face PAD Experiments
+------------------------------
+
+Please refer to :ref:`bob.pad.face.baselines` section of current documentation for more details on how to run the face PAD experiments and setup the databases.
+
+
+Training multi-channel autoencoder-based face PAD system.
+----------------------------------------------------------------
+
+As introduced in the paper [NGM19]_, the training of the system is composed of three main steps, which are summarize in the following table:
+
++----------------------+----------------------+---------------------+
+| Train step           | Training data        | DB, classes used    |
++----------------------+----------------------+---------------------+
+| Train N AEs          | RGB face regions     | CelebA, BF          |
++----------------------+----------------------+---------------------+
+| Fine-tune N AEs      | MC face regions      | WMCA, BF            |
++----------------------+----------------------+---------------------+
+| Train an MLP         | MC latent encodings  | WMCA, BF and PA     |
++----------------------+----------------------+---------------------+
+
+In the above table, **BF** and **PA** stands for samples from **bona-fide** and **presentation attack** classes.
+
+As one can conclude from the table, CelebA and WMCA databases must be installed before the training can take place.
+See :ref:`bob.pad.face.baselines` for databases installation details.
+
+
+1. Train N AEs on RGB data from CelebA
+===========================================
+
+In [NGM19]_ N autoencoders are trained, one for each facial region, here for explanatory purposes, a system containing **one** autoencoder is observed, thus N=1.
+This autoencoder is first pre-trained using RGB images of entire face, which are cropped from CelebA database.
+
+To prepare the training data one can use the following command:
+
+
+.. code-block:: sh
+
+    ./bin/spoof.py \                                        # spoof.py is used to run the preprocessor
+    celeb-a \                                               # run for CelebA database
+    lbp-svm \                                               # required by spoof.py, but unused
+    --skip-extractor-training --skip-extraction --skip-projector-training --skip-projection --skip-score-computation --allow-missing-files \    # execute only preprocessing step
+    --grid idiap \                                          # use grid, only for Idiap users, remove otherwise
+    --groups train \                                        # preprocess only training set of CelebA
+    --preprocessor rgb-face-detect-check-quality-128x128 \  # preprocessor entry point
+    --sub-directory <PATH_TO_STORE_THE_RESULTS>             # define your path here
+
+Running above command, the RGB facial images are aligned and cropped from the training set of the CelebA database. Additionally, a quality assessment is applied to each facial image.
+More specifically, an eye detection algorithm is applied to face images, assuring the deviation of eye coordinates from expected positions is not significant.
+See [NGM19]_ for more details.
+
+Once above script is completed, the data suitable for autoencoder training is located in the folder ``<PATH_TO_STORE_THE_RESULTS>/preprocessed/``. Now the autoencoder can be trained.
+The training procedure is explained in the **Convolutional autoencoder** section in the documentation of the ``bob.learn.pytorch`` package.
+
+.. note::
+
+  Functionality of ``bob.pad.face`` is used to compute the training data. Install and follow the documentation of ``bob.learn.pytorch`` to train the autoencoders. This functional decoupling helps to avoid the dependency of
+  ``bob.pad.face`` from **PyTorch**.
+
+
+.. include:: links.rst
+
+
+
+
+
diff --git a/doc/references.rst b/doc/references.rst
index 12e00f65f4813e4ae8250b259190c816e464c7ac..6276842f362db6b84e741532568849eeef9b3645 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -18,3 +18,6 @@ References
 
 .. [CDSR17] *C. Chen, A. Dantcheva, T. Swearingen, A. Ross*, **Spoofing Faces Using Makeup: An Investigative Study**,
             in: Proc. of 3rd IEEE International Conference on Identity, Security and Behavior Analysis (ISBA), (New Delhi, India), February 2017.
+
+.. [NGM19] *O. Nikisins, A. George, S. Marcel*, **Domain Adaptation in Multi-Channel Autoencoder based Features for Robust Face Anti-Spoofing**,
+            in: Submitted to: 2019 International Conference on Biometrics (ICB), 2019.
diff --git a/setup.py b/setup.py
index 89ae3f14582501a01ad4513dc1f4fb186a12f3d3..b9b97d29d3ad99ddfac96f79354377505194ae15 100644
--- a/setup.py
+++ b/setup.py
@@ -117,6 +117,7 @@ setup(
             'rgb-face-detect-dlib = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_dlib',  # detect faces locally replacing database annotations
             'rgb-face-detect-mtcnn = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_mtcnn',  # detect faces locally replacing database annotations
             'bw-face-detect-mtcnn = bob.pad.face.config.preprocessor.video_face_crop:bw_face_detect_mtcnn',  # detect faces locally, return BW image
+            'rgb-face-detect-check-quality-128x128 = bob.pad.face.config.preprocessor.face_feature_crop_quality_check:face_feature_0_128x128_crop_rgb',  # detect faces locally replacing database annotations, also check face quality by trying to detect the eyes in cropped face.
         ],
 
         # registered extractors: