diff --git a/bob/paper/mccnn/tifs2018/config/haralick_svm.py b/bob/paper/mccnn/tifs2018/config/haralick_svm.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b9a04d3a838333f4f996e3c6e3720282c6d11f9
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/config/haralick_svm.py
@@ -0,0 +1,69 @@
+
+
+# =============================================================================
+# define instance of the preprocessor:
+
+from bob.pad.face.preprocessor import VideoFaceCropAlignBlockPatch
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.bio.base.preprocessor import Preprocessor
+
+import sys
+
+sys.path.append('/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/baseline_configs/extractor/')
+
+class DummyPreprocessor(Preprocessor):
+
+  def __init__(self):
+
+
+    Preprocessor.__init__(self)
+
+  def __call__(self,image,annotations=None):
+    return image
+
+
+_preprocessor=DummyPreprocessor()
+
+preprocessor=Wrapper(_preprocessor)
+
+#==================== DUmmy
+
+# 4x4 grids
+
+from HaralickRDWT import HaralickRDWT
+from bob.bio.video.extractor import Wrapper
+
+DTYPE = None
+
+extractor = Wrapper(HaralickRDWT(dtype=DTYPE,n_hor=4,n_vert=4))
+"""
+In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
+The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+#=======================================================================================
+# define algorithm:
+
+# Linear SVM 
+
+from bob.pad.base.algorithm import SVM
+
+MACHINE_TYPE = 'C_SVC'
+KERNEL_TYPE = 'LINEAR'
+N_SAMPLES = 10000
+
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+
+algorithm = SVM(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
\ No newline at end of file
diff --git a/bob/paper/mccnn/tifs2018/config/lbp_lr_batl_D_T_IR.py b/bob/paper/mccnn/tifs2018/config/lbp_lr_batl_D_T_IR.py
new file mode 100644
index 0000000000000000000000000000000000000000..4820be82ed465b9d56073ccadbdb42d749718c3f
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/config/lbp_lr_batl_D_T_IR.py
@@ -0,0 +1,105 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+This file contains configurations to run LBP and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The idea of the algorithm is introduced in the following paper: [CAM12]_.
+However some settings are different from the ones introduced in the paper.
+"""
+
+#=======================================================================================
+sub_directory = 'lbp_lr'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+#=======================================================================================
+# define preprocessor:
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor.FaceCropAlign import auto_norm_image as _norm_func
+
+FACE_SIZE = 128 # The size of the resulting face
+RGB_OUTPUT_FLAG = False # Gray-scale output
+USE_FACE_ALIGNMENT = True # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+ALIGNMENT_TYPE='lightcnn'
+NORMALIZATION_FUNCTION = _norm_func
+NORMALIZATION_FUNCTION_KWARGS = {}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   alignment_type=ALIGNMENT_TYPE,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE,
+                                   normalization_function = NORMALIZATION_FUNCTION,
+                                   normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+preprocessor = Wrapper(preprocessor = _image_preprocessor,
+                       frame_selector = _frame_selector)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
+"""
+
+#=======================================================================================
+# define extractor:
+
+from bob.pad.face.extractor import LBPHistogram
+
+from bob.bio.video.extractor import Wrapper
+
+LBPTYPE = 'uniform'
+ELBPTYPE = 'regular'
+RAD = 1
+NEIGHBORS = 8
+CIRC = False
+DTYPE = None
+
+extractor = Wrapper(LBPHistogram(
+    lbptype=LBPTYPE,
+    elbptype=ELBPTYPE,
+    rad=RAD,
+    neighbors=NEIGHBORS,
+    circ=CIRC,
+    dtype=DTYPE))
+"""
+In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
+
+The parameters are similar to the ones introduced in [CAM12]_.
+"""
+
+#=======================================================================================
+# define algorithm:
+
+from bob.pad.base.algorithm import LogRegr
+
+C = 1.  # The regularization parameter for the LR classifier
+FRAME_LEVEL_SCORES_FLAG = True  # Return one score per frame
+
+algorithm = LogRegr(
+    C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
+"""
+The Logistic Regression is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
+values.
+"""
diff --git a/bob/paper/mccnn/tifs2018/config/lbp_lr_depth.py b/bob/paper/mccnn/tifs2018/config/lbp_lr_depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ac1309056511c4327b40ca15d4b8a1c1254162a
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/config/lbp_lr_depth.py
@@ -0,0 +1,112 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+This file contains configurations to run LBP and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The idea of the algorithm is introduced in the following paper: [CAM12]_.
+However some settings are different from the ones introduced in the paper.
+"""
+
+#=======================================================================================
+sub_directory = 'lbp_lr'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+#=======================================================================================
+# define preprocessor:
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor.FaceCropAlign import auto_norm_image as _norm_func
+
+FACE_SIZE = 128 # The size of the resulting face
+RGB_OUTPUT_FLAG = False # Gray-scale output
+USE_FACE_ALIGNMENT = True # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+ALIGNMENT_TYPE='lightcnn'
+NORMALIZATION_FUNCTION = _norm_func
+NORMALIZATION_FUNCTION_KWARGS = {}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   alignment_type=ALIGNMENT_TYPE,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE,
+                                   normalization_function = NORMALIZATION_FUNCTION,
+                                   normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+preprocessor = Wrapper(preprocessor = _image_preprocessor,
+                       frame_selector = _frame_selector)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
+"""
+
+#=======================================================================================
+# define extractor:
+
+from bob.pad.face.extractor import LBPHistogram
+
+from bob.bio.video.extractor import Wrapper
+LBPTYPE = 'uniform'
+ELBPTYPE = 'modified'
+RAD = 1
+NEIGHBORS = 8
+CIRC = False
+DTYPE = None
+N_HOR = 1
+N_VERT = 1
+
+
+#lbp_histogram_n8r1_modified_1x1 
+
+extractor= Wrapper(LBPHistogram(
+                                lbptype=LBPTYPE,
+                                elbptype=ELBPTYPE,
+                                rad=RAD,
+                                neighbors=NEIGHBORS,
+                                circ=CIRC,
+                                dtype=DTYPE,
+                                n_hor=N_HOR,
+                                n_vert=N_VERT))
+
+"""
+In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
+
+The parameters are similar to the ones introduced in [CAM12]_.
+"""
+
+#=======================================================================================
+# define algorithm:
+
+from bob.pad.base.algorithm import LogRegr
+
+C = 1.  # The regularization parameter for the LR classifier
+FRAME_LEVEL_SCORES_FLAG = True  # Return one score per frame
+
+algorithm = LogRegr(
+    C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
+"""
+The Logistic Regression is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
+values.
+"""
diff --git a/bob/paper/mccnn/tifs2018/config/lbp_lr_infrared.py b/bob/paper/mccnn/tifs2018/config/lbp_lr_infrared.py
new file mode 100644
index 0000000000000000000000000000000000000000..c8f02579e4e9a19c8a28eade34a96cdb4c88fe3a
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/config/lbp_lr_infrared.py
@@ -0,0 +1,113 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+This file contains configurations to run LBP and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The idea of the algorithm is introduced in the following paper: [CAM12]_.
+However some settings are different from the ones introduced in the paper.
+"""
+
+#=======================================================================================
+sub_directory = 'lbp_lr'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+#=======================================================================================
+# define preprocessor:
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor.FaceCropAlign import auto_norm_image as _norm_func
+
+FACE_SIZE = 128 # The size of the resulting face
+RGB_OUTPUT_FLAG = False # Gray-scale output
+USE_FACE_ALIGNMENT = True # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+ALIGNMENT_TYPE='lightcnn'
+NORMALIZATION_FUNCTION = _norm_func
+NORMALIZATION_FUNCTION_KWARGS = {}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   alignment_type=ALIGNMENT_TYPE,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE,
+                                   normalization_function = NORMALIZATION_FUNCTION,
+                                   normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+preprocessor = Wrapper(preprocessor = _image_preprocessor,
+                       frame_selector = _frame_selector)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
+"""
+
+#=======================================================================================
+# define extractor:
+
+from bob.pad.face.extractor import LBPHistogram
+
+from bob.bio.video.extractor import Wrapper
+
+LBPTYPE = 'uniform'
+ELBPTYPE = 'regular'
+RAD = 1
+NEIGHBORS = 8
+CIRC = False
+DTYPE = None
+N_HOR = 2
+N_VERT = 2
+
+#lbp_histogram_n8r1_uniform_2x2
+
+
+extractor= Wrapper(LBPHistogram(
+                               lbptype=LBPTYPE,
+                               elbptype=ELBPTYPE,
+                               rad=RAD,
+                               neighbors=NEIGHBORS,
+                               circ=CIRC,
+                               dtype=DTYPE,
+                               n_hor=N_HOR,
+                               n_vert=N_VERT))
+
+"""
+In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
+
+The parameters are similar to the ones introduced in [CAM12]_.
+"""
+
+#=======================================================================================
+# define algorithm:
+
+from bob.pad.base.algorithm import LogRegr
+
+C = 1.  # The regularization parameter for the LR classifier
+FRAME_LEVEL_SCORES_FLAG = True  # Return one score per frame
+
+algorithm = LogRegr(
+    C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
+"""
+The Logistic Regression is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
+values.
+"""
diff --git a/bob/paper/mccnn/tifs2018/config/lbp_lr_thermal.py b/bob/paper/mccnn/tifs2018/config/lbp_lr_thermal.py
new file mode 100644
index 0000000000000000000000000000000000000000..c7d1dc9872bccf7cdc5e0c0b7f1186c741cc8c13
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/config/lbp_lr_thermal.py
@@ -0,0 +1,111 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+This file contains configurations to run LBP and SVM based face PAD baseline.
+The settings are tuned for the Replay-attack database.
+The idea of the algorithm is introduced in the following paper: [CAM12]_.
+However some settings are different from the ones introduced in the paper.
+"""
+
+#=======================================================================================
+sub_directory = 'lbp_lr'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+#=======================================================================================
+# define preprocessor:
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor.FaceCropAlign import auto_norm_image as _norm_func
+
+FACE_SIZE = 128 # The size of the resulting face
+RGB_OUTPUT_FLAG = False # Gray-scale output
+USE_FACE_ALIGNMENT = True # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+ALIGNMENT_TYPE='lightcnn'
+NORMALIZATION_FUNCTION = _norm_func
+NORMALIZATION_FUNCTION_KWARGS = {}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   alignment_type=ALIGNMENT_TYPE,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE,
+                                   normalization_function = NORMALIZATION_FUNCTION,
+                                   normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+preprocessor = Wrapper(preprocessor = _image_preprocessor,
+                       frame_selector = _frame_selector)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
+"""
+
+#=======================================================================================
+# define extractor:
+
+from bob.pad.face.extractor import LBPHistogram
+
+from bob.bio.video.extractor import Wrapper
+
+LBPTYPE = 'uniform'
+RAD = 1
+NEIGHBORS = 8
+CIRC = False
+DTYPE = None
+N_HOR = 1
+N_VERT = 1
+ELBPTYPE = 'modified'
+
+
+#lbp_histogram_n8r1_modified_1x1
+extractor= Wrapper(LBPHistogram(
+                                lbptype=LBPTYPE,
+                                elbptype=ELBPTYPE,
+                                rad=RAD,
+                                neighbors=NEIGHBORS,
+                                circ=CIRC,
+                                dtype=DTYPE,
+                                n_hor=N_HOR,
+                                n_vert=N_VERT))
+"""
+In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
+
+The parameters are similar to the ones introduced in [CAM12]_.
+"""
+
+#=======================================================================================
+# define algorithm:
+
+from bob.pad.base.algorithm import LogRegr
+
+C = 1.  # The regularization parameter for the LR classifier
+FRAME_LEVEL_SCORES_FLAG = True  # Return one score per frame
+
+algorithm = LogRegr(
+    C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
+"""
+The Logistic Regression is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
+values.
+"""
diff --git a/bob/paper/mccnn/tifs2018/config/qm_lr.py b/bob/paper/mccnn/tifs2018/config/qm_lr.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c337f617c0fcdefec4d43c1d3e7a7caafc4591b
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/config/qm_lr.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+"""
+This file contains configurations to run Image Quality Measures (IQM) and LR based face PAD algorithm.
+The settings of the preprocessor and extractor are tuned for the Replay-attack database.
+The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+#=======================================================================================
+sub_directory = 'qm_lr'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+#=======================================================================================
+# define preprocessor:
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+FACE_SIZE = 128 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = True # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations 'mtcnn' if no annotations available
+MIN_FACE_SIZE = 50 # skip small faces
+ALIGNMENT_TYPE='lightcnn'
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   alignment_type=ALIGNMENT_TYPE,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+preprocessor = Wrapper(preprocessor = _image_preprocessor,
+                       frame_selector = _frame_selector)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
+facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
+"""
+
+#=======================================================================================
+# define extractor:
+
+from bob.pad.face.extractor import ImageQualityMeasure
+
+from bob.bio.video.extractor import Wrapper
+
+GALBALLY = True
+MSU = True
+DTYPE = None
+
+extractor = Wrapper(ImageQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE))
+"""
+In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
+The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+#=======================================================================================
+# define algorithm:
+
+from bob.pad.base.algorithm import LogRegr
+
+C = 1.  # The regularization parameter for the LR classifier
+FRAME_LEVEL_SCORES_FLAG = True  # Return one score per frame
+
+algorithm = LogRegr(
+    C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
+"""
+The Logistic Regression is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
+values.
+"""
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_color.py b/bob/paper/mccnn/tifs2018/database/batl_db_color.py
new file mode 100644
index 0000000000000000000000000000000000000000..09060cf6b9f5fe9dd3e5728d12d45b256b814a6b
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_color.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+BATL Db is a database for face PAD experiments.
+"""
+
+from bob.pad.face.database import BatlPadDatabase
+
+# Directory where the data files are stored.
+# This directory is given in the .bob_bio_databases.txt file located in your home directory
+ORIGINAL_DIRECTORY = "[YOUR_WMCA_DB_DIRECTORY]"
+"""Value of ``~/.bob_bio_databases.txt`` for this database"""
+
+ORIGINAL_EXTENSION = ".h5"  # extension of the data files
+
+ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
+
+database = BatlPadDatabase(
+    protocol=PROTOCOL,
+    original_directory=ORIGINAL_DIRECTORY,
+    original_extension=ORIGINAL_EXTENSION,
+    landmark_detect_method="mtcnn",  
+    exclude_attacks_list=['makeup'],
+    exclude_pai_all_sets=True, 
+    append_color_face_roi_annot=False) 
+
+"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
+database settings.
+
+.. warning::
+
+   This class only provides a programmatic interface to load data in an orderly
+   manner, respecting usage protocols. It does **not** contain the raw
+   data files. You should procure those yourself.
+
+Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
+You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
+value to the places where you actually installed the BATL database.
+"""
+
+protocol = PROTOCOL
+"""
+You may modify this at runtime by specifying the option ``--protocol`` on the
+command-line of ``spoof.py`` or using the keyword ``protocol`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
+
+groups = ["train", "dev", "eval"]
+"""The default groups to use for reproducing the baselines.
+
+You may modify this at runtime by specifying the option ``--groups`` on the
+command-line of ``spoof.py`` or using the keyword ``groups`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_depth.py b/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
new file mode 100644
index 0000000000000000000000000000000000000000..5e982a6725850d237544a5ca700c90d435c66796
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+BATL Db is a database for face PAD experiments.
+"""
+
+from bob.pad.face.database import BatlPadDatabase
+
+# Directory where the data files are stored.
+# This directory is given in the .bob_bio_databases.txt file located in your home directory
+ORIGINAL_DIRECTORY = "[YOUR_WMCA_DB_DIRECTORY]"
+"""Value of ``~/.bob_bio_databases.txt`` for this database"""
+
+ORIGINAL_EXTENSION = ".h5"  # extension of the data files
+
+ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL = 'grandtest-depth-50'+unseen_protocols[0]
+
+database = BatlPadDatabase(
+    protocol=PROTOCOL,
+    original_directory=ORIGINAL_DIRECTORY,
+    original_extension=ORIGINAL_EXTENSION,
+    landmark_detect_method="mtcnn",  
+    exclude_attacks_list=['makeup'],
+    exclude_pai_all_sets=True, 
+    append_color_face_roi_annot=False) 
+
+"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
+database settings.
+
+.. warning::
+
+   This class only provides a programmatic interface to load data in an orderly
+   manner, respecting usage protocols. It does **not** contain the raw
+   data files. You should procure those yourself.
+
+Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
+You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
+value to the places where you actually installed the BATL database.
+"""
+
+protocol = PROTOCOL
+"""
+You may modify this at runtime by specifying the option ``--protocol`` on the
+command-line of ``spoof.py`` or using the keyword ``protocol`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
+
+groups = ["train", "dev", "eval"]
+"""The default groups to use for reproducing the baselines.
+
+You may modify this at runtime by specifying the option ``--groups`` on the
+command-line of ``spoof.py`` or using the keyword ``groups`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py b/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69e8c966f170db221f981fb19ffefe85d7351e6
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+BATL Db is a database for face PAD experiments.
+"""
+
+from bob.pad.face.database import BatlPadDatabase
+
+# Directory where the data files are stored.
+# This directory is given in the .bob_bio_databases.txt file located in your home directory
+ORIGINAL_DIRECTORY = "[YOUR_WMCA_DB_DIRECTORY]"
+"""Value of ``~/.bob_bio_databases.txt`` for this database"""
+
+ORIGINAL_EXTENSION = ".h5"  # extension of the data files
+
+ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL = 'grandtest-infrared-50'+unseen_protocols[0]
+
+database = BatlPadDatabase(
+    protocol=PROTOCOL,
+    original_directory=ORIGINAL_DIRECTORY,
+    original_extension=ORIGINAL_EXTENSION,
+    landmark_detect_method="mtcnn",  
+    exclude_attacks_list=['makeup'],
+    exclude_pai_all_sets=True, 
+    append_color_face_roi_annot=False) 
+
+"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
+database settings.
+
+.. warning::
+
+   This class only provides a programmatic interface to load data in an orderly
+   manner, respecting usage protocols. It does **not** contain the raw
+   data files. You should procure those yourself.
+
+Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
+You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
+value to the places where you actually installed the BATL database.
+"""
+
+protocol = PROTOCOL
+"""
+You may modify this at runtime by specifying the option ``--protocol`` on the
+command-line of ``spoof.py`` or using the keyword ``protocol`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
+
+groups = ["train", "dev", "eval"]
+"""The default groups to use for reproducing the baselines.
+
+You may modify this at runtime by specifying the option ``--groups`` on the
+command-line of ``spoof.py`` or using the keyword ``groups`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py b/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
new file mode 100644
index 0000000000000000000000000000000000000000..aa05a0ec6dec25507e33e930affec234fa5791e7
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+BATL Db is a database for face PAD experiments.
+"""
+
+from bob.pad.face.database import BatlPadDatabase
+
+# Directory where the data files are stored.
+# This directory is given in the .bob_bio_databases.txt file located in your home directory
+ORIGINAL_DIRECTORY = "[YOUR_WMCA_DB_DIRECTORY]"
+"""Value of ``~/.bob_bio_databases.txt`` for this database"""
+
+ORIGINAL_EXTENSION = ".h5"  # extension of the data files
+
+ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL = 'grandtest-color*infrared*depth*thermal-50'+unseen_protocols[0]
+
+database = BatlPadDatabase(
+    protocol=PROTOCOL,
+    original_directory=ORIGINAL_DIRECTORY,
+    original_extension=ORIGINAL_EXTENSION,
+    landmark_detect_method="mtcnn",  
+    exclude_attacks_list=['makeup'],
+    exclude_pai_all_sets=True, 
+    append_color_face_roi_annot=False) 
+
+"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
+database settings.
+
+.. warning::
+
+   This class only provides a programmatic interface to load data in an orderly
+   manner, respecting usage protocols. It does **not** contain the raw
+   data files. You should procure those yourself.
+
+Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
+You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
+value to the places where you actually installed the BATL database.
+"""
+
+protocol = PROTOCOL
+"""
+You may modify this at runtime by specifying the option ``--protocol`` on the
+command-line of ``spoof.py`` or using the keyword ``protocol`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
+
+groups = ["train", "dev", "eval"]
+"""The default groups to use for reproducing the baselines.
+
+You may modify this at runtime by specifying the option ``--groups`` on the
+command-line of ``spoof.py`` or using the keyword ``groups`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py b/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
new file mode 100644
index 0000000000000000000000000000000000000000..c04b715d228fb0ffe639f3657efbe26f8d2f6df6
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+"""
+BATL Db is a database for face PAD experiments.
+"""
+
+from bob.pad.face.database import BatlPadDatabase
+
+# Directory where the data files are stored.
+# This directory is given in the .bob_bio_databases.txt file located in your home directory
+ORIGINAL_DIRECTORY = "[YOUR_WMCA_DB_DIRECTORY]"
+"""Value of ``~/.bob_bio_databases.txt`` for this database"""
+
+ORIGINAL_EXTENSION = ".h5"  # extension of the data files
+
+ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL = 'grandtest-thermal-50'+unseen_protocols[0]
+
+database = BatlPadDatabase(
+    protocol=PROTOCOL,
+    original_directory=ORIGINAL_DIRECTORY,
+    original_extension=ORIGINAL_EXTENSION,
+    landmark_detect_method="mtcnn",  
+    exclude_attacks_list=['makeup'],
+    exclude_pai_all_sets=True, 
+    append_color_face_roi_annot=False) 
+
+"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
+database settings.
+
+.. warning::
+
+   This class only provides a programmatic interface to load data in an orderly
+   manner, respecting usage protocols. It does **not** contain the raw
+   data files. You should procure those yourself.
+
+Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
+You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
+value to the places where you actually installed the BATL database.
+"""
+
+protocol = PROTOCOL
+"""
+You may modify this at runtime by specifying the option ``--protocol`` on the
+command-line of ``spoof.py`` or using the keyword ``protocol`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
+
+groups = ["train", "dev", "eval"]
+"""The default groups to use for reproducing the baselines.
+
+You may modify this at runtime by specifying the option ``--groups`` on the
+command-line of ``spoof.py`` or using the keyword ``groups`` on a
+configuration file that is loaded **after** this configuration resource.
+"""
diff --git a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
new file mode 100644
index 0000000000000000000000000000000000000000..08e2c9c82582f92287ca68c047a22674abb68a5a
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
@@ -0,0 +1,163 @@
+from __future__ import division
+from bob.bio.base.extractor import Extractor
+import bob.bio.video
+import bob.ip.base
+import numpy as np
+
+#Extra packages
+import pywt
+import mahotas
+
+#TODO: Move to paper package, number of directions?, type casting, 
+
+
+class HaralickRDWT(Extractor):
+    """Calculates RDWT+Haralick feature descriptors for 2D and multi-channel images.
+
+    Parameters
+    ----------
+    wavelet :str
+        The wavelet family to use for decomposition
+    n_hor : int
+        Number of blocks horizontally for spatially-enhanced LBP/MCT
+        histograms. Default: 1
+    n_vert
+        Number of blocks vertically for spatially-enhanced LBP/MCT
+        histograms. Default: 1
+
+    Attributes
+    ----------
+    dtype : numpy.dtype
+        If a ``dtype`` is specified in the contructor, it is assured that the
+        resulting features have that dtype.
+    """
+
+    def __init__(self,
+                 dtype=None,
+                 wavelet='db1',
+                 n_hor=1,
+                 n_vert=1):
+
+        super(HaralickRDWT, self).__init__(
+            dtype=dtype,
+            wavelet=wavelet,
+            n_hor=n_hor,
+            n_vert=n_vert)
+
+    
+        self.dtype = dtype
+        self.wavelet=wavelet
+        self.n_hor = n_hor
+        self.n_vert = n_vert
+
+
+    def min_max_norm(self,img,do_norm):
+        """
+        Normalizes the image to 0-255 range based on min max range, and cast it to 'int8'
+
+        """
+
+        if do_norm:
+
+            t_min = np.min(img.flatten())
+            t_max = np.max(img.flatten())
+
+            data_n = ((img-t_min)/(t_max-t_min))*255.0
+        else:
+            data_n=img.copy()
+
+
+        return data_n.astype('uint8')
+
+
+    def comp_block_rwdt_haralick(self, data):
+        """
+        Extracts RDWT decompositiond and therir haralick descriptors from a gray-scale image/block.
+
+        enforcing the data type, if desired.
+
+        Parameters
+        ----------
+        data : numpy.ndarray
+            The preprocessed data to be transformed into one vector.
+
+        Returns
+        -------
+        1D :py:class:`numpy.ndarray`
+            The extracted feature vector, of the desired ``dtype`` (if
+            specified)
+        """
+        assert isinstance(data, np.ndarray)
+
+        # 1 level SWT/ UDWT decomposition
+        
+        coeff=pywt.swt2(data, self.wavelet,1)
+        LL, (LH, HL, HH) = coeff[0]
+
+        decompositions=[LL,LH,HL,HH,data] # all four decompositions and the original data
+
+        features=[]
+
+        for decomposition in decompositions:
+
+            ## the type should be decided; haralick needs it to be uint8
+            feat=mahotas.features.haralick(f=self.min_max_norm(decomposition,True),return_mean=True, return_mean_ptp=False,use_x_minus_y_variance=False) # this gives one mean
+
+            features.append(feat)
+
+        # feature vector for the patch
+        comb_patch=np.array(features).reshape(1,-1)
+
+        return comb_patch
+
+
+
+
+    def __call__(self, mcdata):
+        """
+        Extracts RDWT+ Haralick features from multi-channel images, blockwise.
+        Parameters
+        ----------
+        data : numpy.ndarray nXrowxCol
+            The preprocessed data to be transformed into one vector.n is the number of channels
+
+        Returns
+        -------
+        1D :py:class:`numpy.ndarray`
+            The extracted feature vector, of the desired ``dtype`` (if
+            specified)
+
+        """
+
+        assert isinstance(mcdata, np.ndarray)
+
+        if len(mcdata.shape)>2:
+            channels=mcdata.shape[0]
+        else:
+            channels=1
+            mcdata=np.expand_dims(mcdata,0)
+
+        haralick_feat=[]
+
+        for channel in range(channels):
+
+            data=mcdata[channel,:]  # 2D image
+
+            #print("data.shape",data.shape)
+
+            # Make sure the data can be split into equal blocks:
+            row_max = int(data.shape[0] / self.n_vert) * self.n_vert
+            col_max = int(data.shape[1] / self.n_hor) * self.n_hor
+            data = data[:row_max, :col_max]
+
+            blocks = [sub_block for block in np.hsplit(data, self.n_hor) for sub_block in np.vsplit(block, self.n_vert)]
+
+            patch_haralick_feat = [self.comp_block_rwdt_haralick(block) for block in blocks]
+
+            haralick_feat.append(np.array(patch_haralick_feat).flatten())
+
+        feat=np.array(haralick_feat).flatten() # flatten the features
+
+        return feat
+
+       
\ No newline at end of file
diff --git a/bob/paper/mccnn/tifs2018/extractor/__init__.py b/bob/paper/mccnn/tifs2018/extractor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b5af4d85ff11b98f7682fb83ddd448420604bc7
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/extractor/__init__.py
@@ -0,0 +1 @@
+from .HaralickRDWT import HaralickRDWT
\ No newline at end of file
diff --git a/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1.py b/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..d8b41f70b85ee4c39e3967d05e84a6d135832b64
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1.py
@@ -0,0 +1,146 @@
+
+
+# =============================================================================
+# define instance of the preprocessor:
+
+from bob.pad.face.preprocessor import VideoFaceCropAlignBlockPatch
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor.FaceCropAlign import auto_norm_image as _norm_func
+
+from torchvision import transforms
+
+from bob.learn.pytorch.datasets import ChannelSelect
+
+# names of the channels to process:
+_channel_names = ['color','depth','infrared','thermal']
+
+# dictionary containing preprocessors for all channels:
+_preprocessors = {}
+
+"""
+Preprocessor to be used for Color channel.
+"""
+FACE_SIZE = 128  # The size of the resulting face
+RGB_OUTPUT_FLAG = False  # BW output
+USE_FACE_ALIGNMENT = True  # use annotations
+MAX_IMAGE_SIZE = None  # no limiting here
+FACE_DETECTION_METHOD = 'mtcnn'  # use ANNOTATIONS
+MIN_FACE_SIZE = 50  # skip small faces
+ALIGNMENT_TYPE = 'lightcnn'
+
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                    rgb_output_flag = RGB_OUTPUT_FLAG,
+                                    use_face_alignment = USE_FACE_ALIGNMENT,
+                                    alignment_type =ALIGNMENT_TYPE,
+                                    max_image_size = MAX_IMAGE_SIZE,
+                                    face_detection_method = FACE_DETECTION_METHOD,
+                                    min_face_size = MIN_FACE_SIZE)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+_preprocessor_rgb = Wrapper(preprocessor = _image_preprocessor,
+                            frame_selector = _frame_selector)
+
+_preprocessors[_channel_names[0]] = _preprocessor_rgb
+
+"""
+Preprocessor to be used for Depth, Infrared or Thermal channels:
+"""
+FACE_SIZE = 128  # The size of the resulting face
+RGB_OUTPUT_FLAG = False  # Gray-scale output
+USE_FACE_ALIGNMENT = True  # use annotations
+MAX_IMAGE_SIZE = None  # no limiting here
+FACE_DETECTION_METHOD = None  # use annotations
+MIN_FACE_SIZE = 50  # skip small faces
+NORMALIZATION_FUNCTION = _norm_func
+NORMALIZATION_FUNCTION_KWARGS = {}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+
+_image_preprocessor_ir = FaceCropAlign(face_size = FACE_SIZE,
+                                    rgb_output_flag = RGB_OUTPUT_FLAG,
+                                    use_face_alignment = USE_FACE_ALIGNMENT,
+                                    alignment_type =ALIGNMENT_TYPE,
+                                    max_image_size = MAX_IMAGE_SIZE,
+                                    face_detection_method = FACE_DETECTION_METHOD,
+                                    min_face_size = MIN_FACE_SIZE,
+                                    normalization_function = NORMALIZATION_FUNCTION,
+                                    normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+
+_preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir,
+                               frame_selector = _frame_selector)
+
+_preprocessors[_channel_names[1]] = _preprocessor_ir
+
+
+_preprocessors[_channel_names[2]] = _preprocessor_ir
+
+_preprocessors[_channel_names[3]] = _preprocessor_ir
+
+
+preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors,
+                                            channel_names = _channel_names,
+                                            return_multi_channel_flag = True)
+
+
+#====================================================================================
+# MC-CNN algorithm
+
+
+from bob.learn.pytorch.extractor.image import MCCNNExtractor
+
+from bob.bio.video.extractor import Wrapper
+
+MODEL_FILE='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1/0_1_2_3_False_conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc_4/model_25_0.pth'   
+####################################################################
+
+ADAPTED_LAYERS= 'conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc'
+####################################################################
+
+SELECTED_CHANNELS= [0,1,2,3]
+####################################################################
+
+
+NUM_CHANNELS_USED=len(SELECTED_CHANNELS)
+
+_img_transform = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),transforms.ToTensor()])
+
+
+_image_extracor=MCCNNExtractor(num_channels_used=NUM_CHANNELS_USED, transforms=_img_transform, model_file=MODEL_FILE)
+
+extractor = Wrapper(_image_extracor)
+
+#=======================================================================================
+# define algorithm:
+# Dummy algorithm
+
+from bob.pad.base.algorithm import Algorithm
+
+class DummyAlgorithm(Algorithm):
+    """An algorithm that takes the precomputed predictions and uses them for
+    scoring."""
+
+    def __init__(self, **kwargs):
+
+      super(DummyAlgorithm, self).__init__(
+          **kwargs)
+
+    def project(self, feature):
+      # print("feature",feature.as_array())
+      return feature.as_array().reshape(-1,1)
+
+
+    def score_for_multiple_projections(self, predictions):
+      # one node at the output
+
+      return list(predictions)
+
+    def score(self, predictions):
+      return list(predictions)
+
+algorithm = DummyAlgorithm(performs_projection=True,  requires_projector_training=False)
\ No newline at end of file
diff --git a/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1_FASNet.py b/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1_FASNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..210c967b838f78d3fbcac6c60bff2afdbb43d19e
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1_FASNet.py
@@ -0,0 +1,147 @@
+
+
+# =============================================================================
+# define instance of the preprocessor:
+
+from bob.pad.face.preprocessor import VideoFaceCropAlignBlockPatch
+
+from bob.pad.face.preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+from bob.bio.video.utils import FrameSelector
+
+from bob.pad.face.preprocessor.FaceCropAlign import auto_norm_image as _norm_func
+
+from torchvision import transforms
+
+from bob.learn.pytorch.datasets import ChannelSelect
+
+# names of the channels to process:
+_channel_names = ['color','depth','infrared','thermal']
+
+# dictionary containing preprocessors for all channels:
+_preprocessors = {}
+
+"""
+Preprocessor to be used for Color channel.
+"""
+FACE_SIZE = 128  # The size of the resulting face
+RGB_OUTPUT_FLAG = False  # BW output
+USE_FACE_ALIGNMENT = True  # use annotations
+MAX_IMAGE_SIZE = None  # no limiting here
+FACE_DETECTION_METHOD = 'mtcnn'  # use ANNOTATIONS
+MIN_FACE_SIZE = 50  # skip small faces
+ALIGNMENT_TYPE = 'lightcnn'
+
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                    rgb_output_flag = RGB_OUTPUT_FLAG,
+                                    use_face_alignment = USE_FACE_ALIGNMENT,
+                                    alignment_type =ALIGNMENT_TYPE,
+                                    max_image_size = MAX_IMAGE_SIZE,
+                                    face_detection_method = FACE_DETECTION_METHOD,
+                                    min_face_size = MIN_FACE_SIZE)
+
+_frame_selector = FrameSelector(selection_style = "all")
+
+_preprocessor_rgb = Wrapper(preprocessor = _image_preprocessor,
+                            frame_selector = _frame_selector)
+
+_preprocessors[_channel_names[0]] = _preprocessor_rgb
+
+"""
+Preprocessor to be used for Depth, Infrared or Thermal channels:
+"""
+FACE_SIZE = 128  # The size of the resulting face
+RGB_OUTPUT_FLAG = False  # Gray-scale output
+USE_FACE_ALIGNMENT = True  # use annotations
+MAX_IMAGE_SIZE = None  # no limiting here
+FACE_DETECTION_METHOD = None  # use annotations
+MIN_FACE_SIZE = 50  # skip small faces
+NORMALIZATION_FUNCTION = _norm_func
+NORMALIZATION_FUNCTION_KWARGS = {}
+NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
+
+_image_preprocessor_ir = FaceCropAlign(face_size = FACE_SIZE,
+                                    rgb_output_flag = RGB_OUTPUT_FLAG,
+                                    use_face_alignment = USE_FACE_ALIGNMENT,
+                                    alignment_type =ALIGNMENT_TYPE,
+                                    max_image_size = MAX_IMAGE_SIZE,
+                                    face_detection_method = FACE_DETECTION_METHOD,
+                                    min_face_size = MIN_FACE_SIZE,
+                                    normalization_function = NORMALIZATION_FUNCTION,
+                                    normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
+
+_preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir,
+                               frame_selector = _frame_selector)
+
+_preprocessors[_channel_names[1]] = _preprocessor_ir
+
+
+_preprocessors[_channel_names[2]] = _preprocessor_ir
+
+_preprocessors[_channel_names[3]] = _preprocessor_ir
+
+
+preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors,
+                                            channel_names = _channel_names,
+                                            return_multi_channel_flag = True)
+
+
+#====================================================================================
+# MC-CNN algorithm
+
+
+from bob.learn.pytorch.extractor.image import FASNetExtractor
+
+from bob.bio.video.extractor import Wrapper
+
+MODEL_FILE='/idiap/temp/ageorge/WMCA_FASNet/0_1_2_False_conv1-block1-ffc_3_grandtest-color-50/model_25_0.pth'   
+####################################################################
+
+ADAPTED_LAYERS= 'conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc'
+####################################################################
+
+SELECTED_CHANNELS= [0,1,2,3]
+####################################################################
+
+
+NUM_CHANNELS_USED=len(SELECTED_CHANNELS)
+
+_img_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225])])
+
+
+_image_extracor=FASNetExtractor(transforms=_img_transform, model_file=MODEL_FILE)
+
+extractor = Wrapper(_image_extracor)
+
+#=======================================================================================
+# define algorithm:
+# Dummy algorithm
+
+from bob.pad.base.algorithm import Algorithm
+
+class DummyAlgorithm(Algorithm):
+    """An algorithm that takes the precomputed predictions and uses them for
+    scoring."""
+
+    def __init__(self, **kwargs):
+
+      super(DummyAlgorithm, self).__init__(
+          **kwargs)
+
+    def project(self, feature):
+      # print("feature",feature.as_array())
+      return feature.as_array().reshape(-1,1)
+
+
+    def score_for_multiple_projections(self, predictions):
+      # one node at the output
+
+      return list(predictions)
+
+    def score(self, predictions):
+      return list(predictions)
+
+algorithm = DummyAlgorithm(performs_projection=True,  requires_projector_training=False)
diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_FASNet.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_FASNet.py
new file mode 100644
index 0000000000000000000000000000000000000000..f25a77492724ec1bd10e46f9db622603cbff030c
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_FASNet.py
@@ -0,0 +1,159 @@
+
+from torchvision import transforms
+
+from bob.learn.pytorch.architectures import FASNet
+
+from bob.learn.pytorch.datasets import DataFolder
+
+from bob.pad.face.database import BatlPadDatabase
+
+from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
+
+
+#==============================================================================
+# Load the dataset
+
+""" The steps are as follows
+
+1. Initialize a databae instance, with the protocol, groups and number of frames 
+	(currently for the ones in 'bob.pad.face', and point 'data_folder_train' to the preprocessed directory )	
+	Note: Here we assume that we have already preprocessed the with `spoof.py` script and dumped it to location 
+	pointed to by 'data_folder_train'.
+
+2. Specify the transform to be used on the images. It can be instances of `torchvision.transforms.Compose` or custom functions.
+
+3. Initialize the `data_folder` class with the database instance and all other parameters. This dataset instance is used in
+ the trainer class
+
+4. Initialize the network architecture with required arguments.
+
+5. Define the parameters for the trainer. 
+
+"""
+
+#==============================================================================
+# Initialize the bob database instance 
+
+data_folder_train='/idiap/temp/ageorge/WMCA_channels_baseline/color/preprocessed/'
+
+output_base_path='/idiap/temp/ageorge/WMCA_FASNet/' 
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL_INDEX=0 
+####################################################################
+
+frames=50
+
+extension='.h5'
+
+train_groups=['train'] # only 'train' group is used for training the network
+
+val_groups=['dev']
+
+do_crossvalidation=True
+####################################################################
+
+if do_crossvalidation:
+	phases=['train','val']
+else:
+	phases=['train']
+
+groups={"train":['train'],"val":['dev']}
+
+
+protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+
+exlude_attacks_list=["makeup"]
+
+bob_hldi_instance = BatlPadDatabase(
+    protocol=protocols,
+    original_directory=data_folder_train,
+    original_extension=extension,
+    landmark_detect_method="mtcnn",  # detect annotations using mtcnn
+    exclude_attacks_list=exlude_attacks_list,
+    exclude_pai_all_sets=True,  # exclude makeup from all the sets, which is the default behavior for grandtest protocol
+    append_color_face_roi_annot=False) 
+
+#==============================================================================
+# Initialize the torch dataset, subselect channels from the pretrained files if needed.
+
+SELECTED_CHANNELS = [0,1,2] 
+####################################################################
+
+
+img_transform={}
+
+img_transform['train'] = transforms.Compose([transforms.ToPILImage(),transforms.RandomHorizontalFlip(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225])])
+
+img_transform['val'] = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                 std=[0.229, 0.224, 0.225])])
+
+dataset={}
+
+for phase in phases:
+
+	dataset[phase] = DataFolder(data_folder=data_folder_train,
+						 transform=img_transform[phase],
+						 extension='.hdf5',
+						 bob_hldi_instance=bob_hldi_instance,
+						 groups=groups[phase],
+						 protocol=protocols,
+						 purposes=['real', 'attack'],
+						 allow_missing_files=True)
+
+
+
+#==============================================================================
+# Specify other training parameters
+
+NUM_CHANNELS = len(SELECTED_CHANNELS)
+
+ADAPTED_LAYERS = 'conv1-block1-ffc'
+####################################################################
+ADAPT_REF_CHANNEL = False
+####################################################################
+
+
+
+batch_size = 32
+num_workers = 0
+epochs=25
+learning_rate=0.0001
+seed = 3
+use_gpu = False
+adapted_layers = ADAPTED_LAYERS
+adapt_reference_channel = ADAPT_REF_CHANNEL
+verbose = 2
+UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
+training_logs= output_base_path+UID+'/train_log_dir/'
+output_dir = output_base_path+UID
+
+
+#==============================================================================
+# Load the architecture
+
+
+assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
+
+network=FASNet(pretrained=True)
+#==============================================================================
+"""
+Note: Running in GPU
+
+jman submit --queue gpu \
+--name FASNet \
+--log-dir /idiap/temp/ageorge/WMCA_FASNet/logs/ \
+--environment="PYTHONUNBUFFERED=1" -- \
+./bin/train_fasnet.py \
+/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/mccnn_configs/configs/trainer/wmca_FASNet.py --use-gpu -vvv
+
+Note: Running in cpu
+
+./bin/train_fasnet.py \
+/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/mccnn_configs/configs/trainer/wmca_FASNet.py -vvv
+
+
+"""
+
diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn_v1.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn_v1.py
new file mode 100644
index 0000000000000000000000000000000000000000..193362cd854054fc8f99ce4300fee219d192091d
--- /dev/null
+++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn_v1.py
@@ -0,0 +1,134 @@
+
+from torchvision import transforms
+
+from bob.learn.pytorch.architectures import MCCNN
+
+from bob.learn.pytorch.datasets import DataFolder
+
+from bob.pad.face.database import BatlPadDatabase
+
+from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
+
+
+#==============================================================================
+# Load the dataset
+
+""" The steps are as follows
+
+1. Initialize a databae instance, with the protocol, groups and number of frames 
+	(currently for the ones in 'bob.pad.face', and point 'data_folder_train' to the preprocessed directory )	
+	Note: Here we assume that we have already preprocessed the with `spoof.py` script and dumped it to location 
+	pointed to by 'data_folder_train'.
+
+2. Specify the transform to be used on the images. It can be instances of `torchvision.transforms.Compose` or custom functions.
+
+3. Initialize the `data_folder` class with the database instance and all other parameters. This dataset instance is used in
+ the trainer class
+
+4. Initialize the network architecture with required arguments.
+
+5. Define the parameters for the trainer. 
+
+"""
+
+#==============================================================================
+# Initialize the bob database instance 
+
+data_folder_train='/idiap/temp/ageorge/WMCA/preprocessed/'
+
+output_base_path='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1_new_2/' 
+
+unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+
+PROTOCOL_INDEX=0 
+####################################################################
+
+frames=50
+
+extension='.h5'
+
+train_groups=['train'] # only 'train' group is used for training the network
+
+protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+
+exlude_attacks_list=["makeup"]
+
+bob_hldi_instance_train = BatlPadDatabase(
+    protocol=protocols,
+    original_directory=data_folder_train,
+    original_extension=extension,
+    landmark_detect_method="mtcnn",  # detect annotations using mtcnn
+    exclude_attacks_list=exlude_attacks_list,
+    exclude_pai_all_sets=True,  # exclude makeup from all the sets, which is the default behavior for grandtest protocol
+    append_color_face_roi_annot=False) 
+
+#==============================================================================
+# Initialize the torch dataset, subselect channels from the pretrained files if needed.
+
+SELECTED_CHANNELS = [0,1,2,3] 
+####################################################################
+
+img_transform_train = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),RandomHorizontalFlipImage(p=0.5),transforms.ToTensor()])
+
+dataset = DataFolder(data_folder=data_folder_train,
+					 transform=img_transform_train,
+					 extension='.hdf5',
+					 bob_hldi_instance=bob_hldi_instance_train,
+					 groups=train_groups,
+					 protocol=protocols,
+					 purposes=['real', 'attack'],
+					 allow_missing_files=True)
+
+
+
+#==============================================================================
+# Specify other training parameters
+
+NUM_CHANNELS = len(SELECTED_CHANNELS)
+
+ADAPTED_LAYERS = 'conv1-block1-group1-ffc'
+####################################################################
+ADAPT_REF_CHANNEL = False
+####################################################################
+
+
+
+batch_size = 32
+num_workers = 0
+epochs=25
+learning_rate=0.0001
+seed = 3
+use_gpu = False
+adapted_layers = ADAPTED_LAYERS
+adapt_reference_channel = ADAPT_REF_CHANNEL
+verbose = 2
+UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
+training_logs= output_base_path+UID+'/train_log_dir/'
+output_dir = output_base_path+UID
+
+
+#==============================================================================
+# Load the architecture
+
+
+assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
+
+network=MCCNN(num_channels = NUM_CHANNELS)
+#==============================================================================
+"""
+Note: Running in GPU
+
+jman submit --queue gpu \
+--name mccnnv2 \
+--log-dir /idiap/temp/ageorge/Pytorch_WMCA/MCCNNv2/logs/ \
+--environment="PYTHONUNBUFFERED=1" -- \
+./bin/train_mccnn.py \
+/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py --use-gpu -vvv
+
+Note: Running in cpu
+
+./bin/train_mccnn.py \
+/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py -vvv
+
+"""
+