diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py index 5ba874dc20f32f2e36b1322dcc507fbb84a3046c..3c95b9850cf7bb4745c6174fe5fdd2a92d25d371 100644 --- a/bob/pad/face/config/lbp_svm.py +++ b/bob/pad/face/config/lbp_svm.py @@ -5,13 +5,8 @@ @author: Olegs Nikisins This file contains configurations to run LBP and SVM based face PAD baseline. - The settings are tuned for the Replay-attack database. - -The idea of the algorithm is introduced in the following paper: - -[CAM12]_ - +The idea of the algorithm is introduced in the following paper: [CAM12]_. However some settings are different from the ones introduced in the paper. """ diff --git a/bob/pad/face/config/qm_svm.py b/bob/pad/face/config/qm_svm.py new file mode 100644 index 0000000000000000000000000000000000000000..dcf1473c76f85b3ea806324e55d4f60152c6e95c --- /dev/null +++ b/bob/pad/face/config/qm_svm.py @@ -0,0 +1,104 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- + +""" +@author: Olegs Nikisins + +This file contains configurations to run Image Quality Measures (IQM) and SVM based face PAD baseline. +The settings are tuned for the Replay-attack database. +The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_. +""" + + +#======================================================================================= +sub_directory = 'qm_svm' +""" +Sub-directory where results will be placed. + +You may change this setting using the ``--sub-directory`` command-line option +or the attribute ``sub_directory`` in a configuration file loaded **after** +this resource. +""" + + +#======================================================================================= +# define preprocessor: + +from ..preprocessor import VideoFaceCrop + +CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face +CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE} +FIXED_POSITIONS = None +MASK_SIGMA = None # The sigma for random values areas outside image +MASK_NEIGHBORS = 5 # The number of neighbors to consider while extrapolating +MASK_SEED = None # The seed for generating random values during extrapolation +CHECK_FACE_SIZE_FLAG = True # Check the size of the face +MIN_FACE_SIZE = 50 +USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper) +RGB_OUTPUT_FLAG = True # Return RGB cropped face using local cropper + +preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE, + cropped_positions = CROPPED_POSITIONS, + fixed_positions = FIXED_POSITIONS, + mask_sigma = MASK_SIGMA, + mask_neighbors = MASK_NEIGHBORS, + mask_seed = None, + check_face_size_flag = CHECK_FACE_SIZE_FLAG, + min_face_size = MIN_FACE_SIZE, + use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG, + rgb_output_flag = RGB_OUTPUT_FLAG) +""" +In the preprocessing stage the face is cropped in each frame of the input video given facial annotations. +The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size +below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in +[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB +facial image, which is defined by ``RGB_OUTPUT_FLAG = True``. +""" + + +#======================================================================================= +# define extractor: + +from ..extractor import VideoQualityMeasure + +GALBALLY=True +MSU=True +DTYPE=None + +extractor = VideoQualityMeasure(galbally=GALBALLY, + msu=MSU, + dtype=DTYPE) +""" +In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video. +The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_. +""" + + +#======================================================================================= +# define algorithm: + +from ..algorithm import VideoSvmPadAlgorithm + +MACHINE_TYPE = 'C_SVC' +KERNEL_TYPE = 'RBF' +N_SAMPLES = 10000 +TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]} +MEAN_STD_NORM_FLAG = True # enable mean-std normalization +FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case + +algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE, + kernel_type = KERNEL_TYPE, + n_samples = N_SAMPLES, + trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS, + mean_std_norm_flag = MEAN_STD_NORM_FLAG, + frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG) +""" +The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes. +One score is produced for each frame of the input video, ``frame_level_scores_flag = True``. +The grid search of SVM parameters is used to select the successful settings. +The grid search is done on the subset of training data. +The size of this subset is defined by ``n_samples`` parameter. + +The data is also mean-std normalized, ``mean_std_norm_flag = True``. +""" + diff --git a/doc/baselines.rst b/doc/baselines.rst index 473af1faf81816ae02d7e36fe635129da3149090..7569495f0bd047988573b14191e8ef71ef49a4d2 100644 --- a/doc/baselines.rst +++ b/doc/baselines.rst @@ -93,7 +93,7 @@ LBP features of facial region + SVM classifier Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.lbp_svm_replayattack`. -To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, do the following: +To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, execute the following: .. code-block:: sh @@ -148,18 +148,49 @@ The ROC curves for the particular experiment can be downloaded from here: Image Quality Measures as features of facial region + SVM classifier ======================================================================== +Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.qm_svm_replayattack`. +To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, execute the following: +.. code-block:: sh + $ ./bin/spoof.py qm-svm \ + --database replay --protocol grandtest --groups train dev eval \ + --sub-directory <PATH_TO_STORE_THE_RESULTS> +.. tip:: + Similarly to the tip above you can run this baseline in parallel. +To understand the settings of this baseline PAD experiment you can check the +corresponding configuration file: ``bob/pad/face/config/qm_svm.py`` +To evaluate the results computing EER, HTER and plotting ROC you can use the +following command: +.. code-block:: sh + ./bin/evaluate.py \ + --dev-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-dev \ + --eval-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-eval \ + --legends "IQM features of facial region + SVM classifier + REPLAY-ATTACK database" \ + -F 7 \ + --criterion EER \ + --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf +The EER/HTER errors for `replayattack`_ database are summarized in the Table below: ++-------------------+----------+----------+ +| Protocol | EER,\% | HTER,\% | ++===================+==========+==========+ +| ``grandtest`` | 4.321 | 4.570 | ++-------------------+----------+----------+ + +The ROC curves for the particular experiment can be downloaded from here: +:download:`ROC curve <img/ROC_iqm_svm_replay_attack.pdf>` + +------------ .. include:: links.rst diff --git a/doc/img/ROC_iqm_svm_replay_attack.pdf b/doc/img/ROC_iqm_svm_replay_attack.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a4524065a5d3f1e41e547ae51f63859db6c505d9 Binary files /dev/null and b/doc/img/ROC_iqm_svm_replay_attack.pdf differ diff --git a/doc/img/ROC_lbp_svm_replay_attack.pdf b/doc/img/ROC_lbp_svm_replay_attack.pdf index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..700f2f2ff4f617b49ba18a9cb52863c679caaa82 100644 Binary files a/doc/img/ROC_lbp_svm_replay_attack.pdf and b/doc/img/ROC_lbp_svm_replay_attack.pdf differ diff --git a/doc/references.rst b/doc/references.rst index 8120f84e9aeb234fa5fc8fcf0813065cc4b135fb..911101849170b2dd52a7cd6d1a885887bf7b9166 100644 --- a/doc/references.rst +++ b/doc/references.rst @@ -7,3 +7,8 @@ References .. [CAM12] *I. Chingovska, A. Anjos, and S. Marcel*, **On the effectiveness of local binary patterns in face anti-spoofing**, in: Biometrics Special Interest Group (BIOSIG), 2012 BIOSIG - Proceedings of the International Conference of the, 2012, pp. 1-7. +.. [WHJ15] *Di Wen, Member, IEEE, Hu Han, Member, IEEE and Anil K. Jain, Fellow, IEEE*, **Face Spoof Detection with Image Distortion Analysis**, + in: IEEE Transactions on Information Forensics and Security, 2015. + +.. [CBVM16] *A. Costa-Pazo, S. Bhattacharjee, E. Vazquez-Fernandez and S. Marcel*, **The Replay-Mobile Face Presentation-Attack Database**, + in: Biometrics Special Interest Group (BIOSIG), 2016 BIOSIG - Proceedings of the International Conference of the, 2016, pp. 1-7. diff --git a/doc/resources.rst b/doc/resources.rst index c5814500208bc404fa90a25e4db83973a87a7d34..7b9d574bc3e99c013c3cac8dc84f8d436745dbec 100644 --- a/doc/resources.rst +++ b/doc/resources.rst @@ -45,4 +45,13 @@ LBP features of facial region + SVM for REPLAY-ATTACK ====================================================== .. automodule:: bob.pad.face.config.lbp_svm + :members: + + +.. _bob.pad.face.resources.face_pad.qm_svm_replayattack: + +Image Quality Measures as features of facial region + SVM for REPLAY-ATTACK +================================================================================ + +.. automodule:: bob.pad.face.config.qm_svm :members: \ No newline at end of file diff --git a/setup.py b/setup.py index 7a2d5e34d244bdbbc8abe7b29779735e7f14c389..116ef87a3ecd4afa947dfcf904ee833949483343 100644 --- a/setup.py +++ b/setup.py @@ -103,6 +103,7 @@ setup( # baselines: 'lbp-svm = bob.pad.face.config.lbp_svm', + 'qm-svm = bob.pad.face.config.qm_svm', ], # registered preprocessors: