From 58a5ba7519cc2920b141b6a6776f78829e42daad Mon Sep 17 00:00:00 2001
From: Olegs NIKISINS <onikisins@italix03.idiap.ch>
Date: Fri, 9 Jun 2017 12:10:49 +0200
Subject: [PATCH] Added the initial documentation on LBP+SVM for the
 Replay-attack DB

---
 bob/pad/face/config/lbp_svm.py        | 116 ++++++++++++++++++++
 doc/baselines.rst                     | 152 ++++++++++++++++++++++++++
 doc/img/ROC_lbp_svm_replay_attack.pdf |   0
 doc/installation.rst                  |   2 +
 doc/references.rst                    |  11 +-
 doc/resources.rst                     |  43 +++++++-
 setup.py                              |   7 ++
 7 files changed, 324 insertions(+), 7 deletions(-)
 create mode 100644 bob/pad/face/config/lbp_svm.py
 create mode 100644 doc/img/ROC_lbp_svm_replay_attack.pdf

diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
new file mode 100644
index 00000000..5ba874dc
--- /dev/null
+++ b/bob/pad/face/config/lbp_svm.py
@@ -0,0 +1,116 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+"""
+@author: Olegs Nikisins
+
+This file contains configurations to run LBP and SVM based face PAD baseline.
+
+The settings are tuned for the Replay-attack database.
+
+The idea of the algorithm is introduced in the following paper:
+
+[CAM12]_
+
+However some settings are different from the ones introduced in the paper.
+"""
+
+
+#=======================================================================================
+sub_directory = 'lbp_svm'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+
+#=======================================================================================
+# define preprocessor:
+
+from ..preprocessor import VideoFaceCrop
+
+CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
+CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+FIXED_POSITIONS = None
+MASK_SIGMA = None             # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
+MASK_SEED = None              # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MIN_FACE_SIZE = 50            # Minimal possible size of the face
+USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
+COLOR_CHANNEL = 'gray'        # Convert image to gray-scale format
+
+preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
+                             cropped_positions = CROPPED_POSITIONS,
+                             fixed_positions = FIXED_POSITIONS,
+                             mask_sigma = MASK_SIGMA,
+                             mask_neighbors = MASK_NEIGHBORS,
+                             mask_seed = None,
+                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                             min_face_size = MIN_FACE_SIZE,
+                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
+                             color_channel = COLOR_CHANNEL)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
+below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``use_local_cropper_flag = True``.
+"""
+
+
+#=======================================================================================
+# define extractor:
+
+from ..extractor import VideoLBPHistogram
+
+LBPTYPE='uniform'
+ELBPTYPE='regular'
+RAD=1
+NEIGHBORS=8
+CIRC=False
+DTYPE=None
+
+extractor = VideoLBPHistogram(lbptype=LBPTYPE,
+                              elbptype=ELBPTYPE,
+                              rad=RAD,
+                              neighbors=NEIGHBORS,
+                              circ=CIRC,
+                              dtype=DTYPE)
+"""
+In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
+
+The parameters are similar to the ones introduced in [CAM12]_.
+"""
+
+
+#=======================================================================================
+# define algorithm:
+
+from ..algorithm import VideoSvmPadAlgorithm
+
+MACHINE_TYPE = 'C_SVC'
+KERNEL_TYPE = 'RBF'
+N_SAMPLES = 10000
+TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
+MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
+                                 kernel_type = KERNEL_TYPE,
+                                 n_samples = N_SAMPLES,
+                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
+                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
+                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+"""
+The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+
+In contrast to [CAM12]_, the grid search of SVM parameters is used to select the
+successful settings. The grid search is done on the subset of training data. The size
+of this subset is defined by ``n_samples`` parameter.
+
+The data is also mean-std normalized, ``mean_std_norm_flag = True``.
+"""
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 86228664..473af1fa 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -2,10 +2,162 @@
 
 .. _bob.pad.face.baselines:
 
+
 ===============================
  Executing Baseline Algorithms
 ===============================
 
+This section explains how to execute face presentation attack detection (PAD) algorithms implemented
+in ``bob.pad.face``.
+
+
+Running Baseline Experiments
+----------------------------
+
+To run the baseline PAD experiments, the ``spoof.py`` script located in ``bin`` directory is used.
+To see the description of the script you can type in the console:
+
+.. code-block:: sh
+
+   $ ./bin/verify.py --help
+
+This script is explained in more detail in :ref:`bob.pad.base.experiments`.
+
+Usually it is a good idea to have at least verbose level 2 (i.e., calling
+``spoof.py --verbose --verbose``, or the short version ``spoof.py
+-vv``).
+
+.. note:: **Running in Parallel**
+
+   To run the experiments in parallel, you can define an SGE grid or local host
+   (multi-processing) configurations as explained in
+   :ref:`running_in_parallel`.
+
+   In short, to run in the Idiap SGE grid, you can simply add the ``--grid``
+   command line option, with grid configuration parameters. To run experiments in parallel on
+   the local machine, simply add a ``--parallel <N>`` option, where ``<N>``
+   specifies the number of parallel jobs you want to execute.
+
+
+Database setups and baselines are encoded using
+:ref:`bob.bio.base.configuration-files`, all stored inside the package root, in
+the directory ``bob/pad/face/config``. Documentation for each resource
+is available on the section :ref:`bob.pad.face.resources`.
+
+.. warning::
+
+   You **cannot** run experiments just by executing the command line
+   instructions described in this guide. You **need first** to procure yourself
+   the raw data files that correspond to *each* database used here in order to
+   correctly run experiments with those data. Biometric data is considered
+   private date and, under EU regulations, cannot be distributed without a
+   consent or license. You may consult our
+   :ref:`bob.pad.face.resources.databases` resources section for checking
+   currently supported databases and accessing download links for the raw data
+   files.
+
+   Once the raw data files have been downloaded, particular attention should be
+   given to the directory locations of those. Unpack the databases carefully
+   and annotate the root directory where they have been unpacked.
+
+   Then, carefully read the *Databases* section of
+   :ref:`bob.pad.base.installation` on how to correctly setup the
+   ``~/.bob_bio_databases.txt`` file.
+
+   Use the following keywords on the left side of the assignment (see
+   :ref:`bob.pad.face.resources.databases`):
+
+   .. code-block:: text
+
+      [YOUR_REPLAY_ATTACK_DIRECTORY] = /complete/path/to/replayattack-database/
+
+   Notice it is rather important to use the strings as described above,
+   otherwise ``bob.pad.base`` will not be able to correctly load your images.
+
+   Once this step is done, you can proceed with the instructions below.
+
+
+.. _bob.pad.face.baselines.replay_attack:
+
+
+------------
+
+Baselines on REPLAY-ATTACK database
+--------------------------------------
+
+This section summarizes the results of baseline face PAD experiments on the REPLAY-ATTACK (`replayattack`_) database.
+
+
+LBP features of facial region + SVM classifier
+===================================================
+
+Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`.
+
+To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, do the following:
+
+.. code-block:: sh
+
+    $ ./bin/spoof.py lbp-svm \
+    --database replay --protocol grandtest --groups train dev eval \
+    --sub-directory <PATH_TO_STORE_THE_RESULTS>
+
+.. tip::
+
+    If you are in `idiap`_ you can use SGE grid to speed-up the calculations.
+    Simply add ``--grid idiap`` argument to the above command. For example:
+
+    .. code-block:: sh
+
+        $ ./bin/spoof.py lbp-svm \
+        --database replay --protocol grandtest --groups train dev eval \
+        --sub-directory <PATH_TO_STORE_THE_RESULTS> \
+        --grid idiap
+
+To understand the settings of this baseline PAD experiment you can check the
+corresponding configuration file: ``bob/pad/face/config/lbp_svm.py``
+
+To evaluate the results computing EER, HTER and plotting ROC you can use the
+following command:
+
+.. code-block:: sh
+
+    ./bin/evaluate.py \
+    --dev-files  <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-dev  \
+    --eval-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-eval \
+    --legends "LBP features of facial region + SVM classifier + REPLAY-ATTACK database" \
+    -F 7 \
+    --criterion EER \
+    --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf
+
+
+The EER/HTER errors for `replayattack`_ database are summarized in the Table below:
+
++-------------------+----------+----------+
+|      Protocol     |  EER,\%  |  HTER,\% |
++===================+==========+==========+
+|   ``grandtest``   |  15.117  |  15.609  |
++-------------------+----------+----------+
+
+The ROC curves for the particular experiment can be downloaded from here:
+
+:download:`ROC curve <img/ROC_lbp_svm_replay_attack.pdf>`
+
+
+------------
+
+Image Quality Measures as features of facial region + SVM classifier
+========================================================================
+
+
+
+
+
+
+
+
+
+
+
 
 
 
diff --git a/doc/img/ROC_lbp_svm_replay_attack.pdf b/doc/img/ROC_lbp_svm_replay_attack.pdf
new file mode 100644
index 00000000..e69de29b
diff --git a/doc/installation.rst b/doc/installation.rst
index f3a8cbf7..504281c7 100644
--- a/doc/installation.rst
+++ b/doc/installation.rst
@@ -1,5 +1,7 @@
 
 
+.. _bob.pad.face.installation:
+
 ==============
  Installation
 ==============
diff --git a/doc/references.rst b/doc/references.rst
index c1233b03..8120f84e 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -1,10 +1,9 @@
 .. vim: set fileencoding=utf-8 :
 
-==========
+===========
 References
-==========
+===========
+
+.. [CAM12]  *I. Chingovska, A. Anjos, and S. Marcel*, **On the effectiveness of local binary patterns in face anti-spoofing**,
+            in: Biometrics Special Interest Group (BIOSIG), 2012 BIOSIG - Proceedings of the International Conference of the, 2012, pp. 1-7.
 
-.. [ChingovskaEffectivnes12]  I. Chingovska, A. Anjos, and S. Marcel, ''On the
-	effectiveness of local binary patterns in face anti- spoofing,'' in
-	Biometrics Special Interest Group (BIOSIG), 2012 BIOSIG- Proceedings of the
-	International Conference of the, 2012, pp. 1-7.
diff --git a/doc/resources.rst b/doc/resources.rst
index 7435c848..c5814500 100644
--- a/doc/resources.rst
+++ b/doc/resources.rst
@@ -4,4 +4,45 @@
 
 ===========
  Resources
-===========
\ No newline at end of file
+===========
+
+This section contains a listing of all ready-to-use resources you can find in
+this package.
+
+
+
+.. _bob.pad.face.resources.databases:
+
+Databases
+------------
+
+
+
+
+
+
+
+
+
+
+.. _bob.pad.face.resources.face_pad:
+
+Available face PAD systems
+------------------------------
+
+These configuration files/resources contain parameters of available face PAD systems/algorithms.
+The configuration files contain at least the following arguments of the ``spoof.py`` script:
+
+    * ``sub_directory``
+    * ``preprocessor``
+    * ``extractor``
+    * ``algorithm``
+
+
+.. _bob.pad.face.resources.face_pad.lbp_svm_replayattack:
+
+LBP features of facial region + SVM for REPLAY-ATTACK
+======================================================
+
+.. automodule:: bob.pad.face.config.lbp_svm
+   :members:
\ No newline at end of file
diff --git a/setup.py b/setup.py
index 443c1dae..7a2d5e34 100644
--- a/setup.py
+++ b/setup.py
@@ -98,6 +98,13 @@ setup(
             'replay = bob.pad.face.config.database.replay:database',
             ],
 
+        # registered configurations:
+        'bob.bio.config': [
+
+            # baselines:
+            'lbp-svm = bob.pad.face.config.lbp_svm',
+            ],
+
         # registered preprocessors:
         'bob.pad.preprocessor': [
             'video-face-crop-preproc-64 = bob.pad.face.config.preprocessor.video_face_crop:video_face_crop_preproc_64_64',
-- 
GitLab