diff --git a/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py b/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py index e99f7572ee5560bc808643c86c96534569faf9e6..110c07866f644525d86cf6385d13416a602069ad 100644 --- a/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py +++ b/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py @@ -137,9 +137,9 @@ class VideoSvmPadAlgorithm(Algorithm): **Parameters:** - ``frame_containers`` : :py:class:`list` - Each element in the list is a Frame Container, , see ``bob.bio.video.utils.FrameContainer``. - Each frame Container conteins feature vectors for the particular individual/person. + ``frame_containers`` : [FrameContainer] + A list of Frame Containers, , see ``bob.bio.video.utils.FrameContainer``. + Each frame Container contains feature vectors for the particular individual/person. **Returns:** @@ -173,8 +173,8 @@ class VideoSvmPadAlgorithm(Algorithm): **Returns:** - ``combinations`` : :py:class:`list` - List of dictionaries containing the combinations. + ``combinations`` : [:py:class:`dict`] + A list of dictionaries containing the combinations. """ varNames = sorted(input_dict) @@ -257,10 +257,10 @@ class VideoSvmPadAlgorithm(Algorithm): **Parameters:** - ``training_features`` : :py:class:`list` + ``training_features`` : [[FrameContainer], [FrameContainer]] A list containing two elements: [0] - a list of Frame Containers with - feature vectors fot the real class; [1] - a list of Frame Containers with - feature vectors fot the attack class. + feature vectors for the real class; [1] - a list of Frame Containers with + feature vectors for the attack class. ``n_samples`` : :py:class:`int` Number of uniformly selected feature vectors per class. @@ -457,10 +457,10 @@ class VideoSvmPadAlgorithm(Algorithm): **Parameters:** - ``training_features`` : :py:class:`list` + ``training_features`` : [[FrameContainer], [FrameContainer]] A list containing two elements: [0] - a list of Frame Containers with - feature vectors fot the real class; [1] - a list of Frame Containers with - feature vectors fot the attack class. + feature vectors for the real class; [1] - a list of Frame Containers with + feature vectors for the attack class. ``n_samples`` : :py:class:`int` Number of uniformly selected feature vectors per class defining the @@ -589,10 +589,10 @@ class VideoSvmPadAlgorithm(Algorithm): **Parameters:** - ``training_features`` : :py:class:`list` + ``training_features`` : [[FrameContainer], [FrameContainer]] A list containing two elements: [0] - a list of Frame Containers with - feature vectors fot the real class; [1] - a list of Frame Containers with - feature vectors fot the attack class. + feature vectors for the real class; [1] - a list of Frame Containers with + feature vectors for the attack class. ``projector_file`` : :py:class:`str` The file to save the trained projector to. @@ -660,16 +660,14 @@ class VideoSvmPadAlgorithm(Algorithm): An array containing class probabilities for each frame. First column contains probabilities for each frame being a real class. Second column contains probabilities for each frame being an attack class. - Must be writable with the :py:meth:`write_feature` function and - readable with the :py:meth:`read_feature` function. + Must be writable with the ``write_feature`` function and + readable with the ``read_feature`` function. """ features_array = self.convert_frame_cont_to_array(feature) probabilities = self.machine.predict_class_and_probabilities(features_array)[1] -# probabilities = self.machine.predict_class_and_scores(features_array)[1] - return probabilities @@ -716,12 +714,10 @@ class VideoSvmPadAlgorithm(Algorithm): **Returns:** - ``list_of_scores`` : list + ``list_of_scores`` : [:py:class:`float`] A list containing the scores. """ -# import ipdb; ipdb.set_trace() - if self.frame_level_scores_flag: list_of_scores = self.score(toscore) @@ -733,53 +729,3 @@ class VideoSvmPadAlgorithm(Algorithm): return list_of_scores - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/bob/pad/face/config/database/replay.py b/bob/pad/face/config/database/replay.py index f1cbd70cb6725aa1ff17a172456eecd24d6c8f70..197355ceb13f6d0027948829cd2fbee0d60e1c37 100644 --- a/bob/pad/face/config/database/replay.py +++ b/bob/pad/face/config/database/replay.py @@ -1,11 +1,25 @@ #!/usr/bin/env python +"""`Replayattack`_ is a database for face PAD experiments. + +The Replay-Attack Database for face spoofing consists of 1300 video clips of photo and video attack attempts to 50 clients, +under different lighting conditions. This Database was produced at the Idiap Research Institute, in Switzerland. +The reference citation is [CAM12]_. + +You can download the raw data of the `Replayattack`_ database by following +the link. + +.. include:: links.rst +""" + from bob.pad.face.database import ReplayPadDatabase # Directory where the data files are stored. # This directory is given in the .bob_bio_databases.txt file located in your home directory original_directory = "[YOUR_REPLAY_ATTACK_DIRECTORY]" +"""Value of ``~/.bob_bio_databases.txt`` for this database""" + original_extension = ".mov" # extension of the data files @@ -15,3 +29,17 @@ database = ReplayPadDatabase( original_extension=original_extension, training_depends_on_protocol=True, ) +"""The :py:class:`bob.pad.base.database.PadDatabase` derivative with Replayattack +database settings + +.. warning:: + + This class only provides a programmatic interface to load data in an orderly + manner, respecting usage protocols. It does **not** contain the raw + data files. You should procure those yourself. + +Notice that ``original_directory`` is set to ``[YOUR_REPLAY_ATTACK_DIRECTORY]``. +You must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this +value to the place where you actually installed the Replayattack Database, as +explained in the section :ref:`bob.pad.face.baselines`. +""" \ No newline at end of file diff --git a/bob/pad/face/config/extractor/frame_diff_features.py b/bob/pad/face/config/extractor/frame_diff_features.py new file mode 100644 index 0000000000000000000000000000000000000000..d0e6fb398d4a216ed90877d286b42599793eda02 --- /dev/null +++ b/bob/pad/face/config/extractor/frame_diff_features.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python + +from bob.pad.face.extractor import FrameDiffFeatures + + +#======================================================================================= +# Define instances here: + +window_size=20 +overlap=0 + +frame_diff_feat_extr_w20_over0 = FrameDiffFeatures(window_size=window_size, + overlap=overlap) + +window_size=100 + +frame_diff_feat_extr_w100_over0 = FrameDiffFeatures(window_size=window_size, + overlap=overlap) + diff --git a/bob/pad/face/config/frame_diff_svm.py b/bob/pad/face/config/frame_diff_svm.py new file mode 100644 index 0000000000000000000000000000000000000000..d55a46237352fd68f936f9d4f43a8aec7281a0a3 --- /dev/null +++ b/bob/pad/face/config/frame_diff_svm.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- + +""" +@author: Olegs Nikisins + +This file contains configurations to run Frame Differences and SVM based face PAD baseline. +The settings are tuned for the Replay-attack database. +The idea of the algorithms is inherited from the following paper: [AM11]_. +""" + + +#======================================================================================= +sub_directory = 'frame_diff_svm' +""" +Sub-directory where results will be placed. + +You may change this setting using the ``--sub-directory`` command-line option +or the attribute ``sub_directory`` in a configuration file loaded **after** +this resource. +""" + + +#======================================================================================= +# define preprocessor: + +from ..preprocessor import FrameDifference + +NUMBER_OF_FRAMES = None # process all frames +CHECK_FACE_SIZE_FLAG = True # Check size of the face +MIN_FACE_SIZE = 50 # Minimal size of the face to consider + +preprocessor = FrameDifference(number_of_frames = NUMBER_OF_FRAMES, + check_face_size_flag = CHECK_FACE_SIZE_FLAG, + min_face_size = MIN_FACE_SIZE) +""" +In the preprocessing stage the frame differences are computed for both facial and non-facial/background +regions. In this case all frames of the input video are considered, which is defined by +``number_of_frames = None``. The frames containing faces of the size below ``min_face_size = 50`` threshold +are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor. +The preprocessing idea is introduced in [AM11]_. +""" + + +#======================================================================================= +# define extractor: + +from ..extractor import FrameDiffFeatures + +WINDOW_SIZE=20 +OVERLAP=0 + +extractor = FrameDiffFeatures(window_size=WINDOW_SIZE, + overlap=OVERLAP) +""" +In the feature extraction stage 5 features are extracted for all non-overlapping windows in +the Frame Difference input signals. Five features are computed for each of windows in the +facial face regions, the same is done for non-facial regions. The non-overlapping option +is controlled by ``overlap = 0``. The length of the window is defined by ``window_size`` +argument. +The features are introduced in the following paper: [AM11]_. +""" + + +#======================================================================================= +# define algorithm: + +from ..algorithm import VideoSvmPadAlgorithm + +MACHINE_TYPE = 'C_SVC' +KERNEL_TYPE = 'RBF' +N_SAMPLES = 10000 +TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]} +MEAN_STD_NORM_FLAG = True # enable mean-std normalization +FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case + +algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE, + kernel_type = KERNEL_TYPE, + n_samples = N_SAMPLES, + trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS, + mean_std_norm_flag = MEAN_STD_NORM_FLAG, + frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG) +""" +The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes. +One score is produced for each frame of the input video, ``frame_level_scores_flag = True``. +The grid search of SVM parameters is used to select the successful settings. +The grid search is done on the subset of training data. +The size of this subset is defined by ``n_samples`` parameter. + +The data is also mean-std normalized, ``mean_std_norm_flag = True``. +""" + + + + + diff --git a/bob/pad/face/database/replay.py b/bob/pad/face/database/replay.py index e8f052f2a725f6c8da4172f52ca92636afe4e3b0..fd8bb048677703d1675c44eedce1d732c8161cdf 100644 --- a/bob/pad/face/database/replay.py +++ b/bob/pad/face/database/replay.py @@ -1,12 +1,6 @@ #!/usr/bin/env python2 # -*- coding: utf-8 -*- -""" -Created on Thu May 4 12:03:36 2017 -High level implementation for the REPLAY-ATTACK database - -@author: Olegs Nikisins <olegs.nikisins@idiap.ch> -""" #============================================================================== diff --git a/bob/pad/face/extractor/FrameDiffFeatures.py b/bob/pad/face/extractor/FrameDiffFeatures.py new file mode 100644 index 0000000000000000000000000000000000000000..8560a5fe6257754efa0dbae68bdc1d2a6b8f7e99 --- /dev/null +++ b/bob/pad/face/extractor/FrameDiffFeatures.py @@ -0,0 +1,326 @@ +#!/usr/bin/env python2 +# -*- coding: utf-8 -*- +""" +Created on Wed Jun 14 10:13:21 2017 + +@author: Olegs Nikisins +""" + +#============================================================================== +# Import what is needed here: + +from bob.bio.base.extractor import Extractor + +import numpy as np + +import sys + +import bob.bio.video + +#============================================================================== +# Main body: + +class FrameDiffFeatures(Extractor): + """ + This class is designed to extract features describing frame differences. + + The class allows to compute the following features in the window of the + length defined by ``window_size`` argument: + + 1. The minimum value observed on the cluster + 2. The maximum value observed on the cluster + 3. The mean value observed + 4. The standard deviation on the cluster (unbiased estimator) + 5. The DC ratio (D) as defined by: + + .. math:: + + D(N) = (\sum_{i=1}^N{|FFT_i|}) / (|FFT_0|) + + **Parameters:** + + ``window_size`` : :py:class:`int` + The size of the window to use for feature computation. + + ``overlap`` : :py:class:`int` + Determines the window overlapping; this number has to be between + 0 (no overlapping) and 'window-size'-1. Default: 0. + """ + + def __init__(self, + window_size, + overlap = 0): + + Extractor.__init__(self, + window_size = window_size, + overlap = overlap) + + self.window_size = window_size + self.overlap = overlap + + + #========================================================================== + def dcratio(self, arr): + """ + Calculates the DC ratio as defined by the following formula: + + .. math:: + + D(N) = (\sum_{i=1}^N{|FFT_i|}) / (|FFT_0|) + + **Parameters:** + + ``arr`` : 1D :py:class:`numpy.ndarray` + A 1D array containg frame differences. + + **Returns:** + + ``dcratio`` : :py:class:`float` + Calculated DC ratio. + """ + + if arr.shape[0] <= 1: + return 0. + + res = np.fft.fft(arr.astype('complex128')) + res = np.absolute(res) # absolute value + + if res[0] == 0: + s = sum(res[1:]) + if s > 0: + return sys.float_info.max + elif s < 0: + return -sys.float_info.max + else: + return 0 + + dcratio = sum(res[1:]) / res[0] + + return dcratio + + + #========================================================================== + def remove_nan_rows(self, data): + """ + This function removes rows of nan's from the input array. If the input + array contains nan's only, then an array of ones of the size + (1 x n_features) is returned. + + **Parameters:** + + ``data`` : 2D :py:class:`numpy.ndarray` + An input array of features. Rows - samples, columns - features. + + **Returns:** + + ``ret_arr`` : 2D :py:class:`numpy.ndarray` + Array of features without nan samples. Rows - samples, columns - features. + """ + + d = np.vstack(data) + + ret_arr = d[~np.isnan(d.sum(axis=1)), :] + + if ret_arr.shape[0] == 0: # if array is empty, return array of ones + + ret_arr = np.ones((1, ret_arr.shape[1])) + + return ret_arr + + + #========================================================================== + def cluster_5quantities(self, arr, window_size, overlap): + """ + Calculates the clustered values as described at the paper: Counter- + Measures to Photo Attacks in Face Recognition: a public database and a + baseline, Anjos & Marcel, IJCB'11. + + This script will output a number of clustered observations containing the 5 + described quantities for windows of a configurable size (N): + + 1. The minimum value observed on the cluster + 2. The maximum value observed on the cluster + 3. The mean value observed + 4. The standard deviation on the cluster (unbiased estimator) + 5. The DC ratio (D) as defined by: + + .. math:: + + D(N) = (\sum_{i=1}^N{|FFT_i|}) / (|FFT_0|) + + .. note:: + + We always ignore the first entry from the input array as, by + definition, it is always zero. + + **Parameters:** + + ``arr`` : 1D :py:class:`numpy.ndarray` + A 1D array containg frame differences. + + ``window_size`` : :py:class:`int` + The size of the window to use for feature computation. + + ``overlap`` : :py:class:`int` + Determines the window overlapping; this number has to be between + 0 (no overlapping) and 'window-size'-1. + + **Returns:** + + ``retval`` : 2D :py:class:`numpy.ndarray` + Array of features without nan samples. Rows - samples, columns - features. + Here sample corresponds to features computed from the particular + window of the length ``window_size``. + """ + + retval = np.ndarray((arr.shape[0], 5), dtype='float64') + retval[:] = np.NaN + + for k in range(0, arr.shape[0] - window_size + 1, window_size - overlap): + + obs = arr[k:k + window_size].copy() + + # replace NaN values by set mean so they don't disturb calculations + # much + ok = obs[~np.isnan(obs)] + + obs[np.isnan(obs)] = ok.mean() + + retval[k + window_size - 1] = \ + (obs.min(), obs.max(), obs.mean(), obs.std(ddof=1), self.dcratio(obs)) + + retval = self.remove_nan_rows(retval) # clean-up nan's in the array + + return retval + + + #========================================================================== + def convert_arr_to_frame_cont(self, data): + """ + This function converts an array of samples into a FrameContainer, where + each frame stores features of a particular sample. + + **Parameters:** + + ``data`` : 2D :py:class:`numpy.ndarray` + An input array of features of the size + (Nr. of samples X Nr. of features). + + **Returns:** + + ``frames`` : FrameContainer + Resulting FrameContainer, where each frame stores features of + a particular sample. + """ + + frames = bob.bio.video.FrameContainer() # initialize the FrameContainer + + for idx, sample in enumerate(data): + + frames.add(idx, sample) + + return frames + + + #========================================================================== + def comp_features(self, data, window_size, overlap): + """ + This function computes features for frame differences in the facial and + non-facial regions. + + **Parameters:** + + ``data`` : 2D :py:class:`numpy.ndarray` + An input array of frame differences in facial and non-facial regions. + The first column contains frame differences of facial regions. + The second column contains frame differences of non-facial/background regions. + + ``window_size`` : :py:class:`int` + The size of the window to use for feature computation. + + ``overlap`` : :py:class:`int` + Determines the window overlapping; this number has to be between + 0 (no overlapping) and 'window-size'-1. Default: 0. + + **Returns:** + + ``frames`` : FrameContainer + Features describing frame differences, stored in the FrameContainer. + """ + + d_face = self.cluster_5quantities( data[:, 0], window_size, overlap ) + + d_bg = self.cluster_5quantities( data[:, 1], window_size, overlap ) + + features = np.hstack((d_face, d_bg)) + + frames = self.convert_arr_to_frame_cont(features) + + return frames + + + #========================================================================== + def __call__(self, data): + """ + This function computes features for frame differences in the facial and + non-facial regions. + + **Parameters:** + + ``data`` : 2D :py:class:`numpy.ndarray` + An input array of frame differences in facial and non-facial regions. + The first column contains frame differences of facial regions. + The second column contains frame differences of non-facial/background regions. + + **Returns:** + + ``frames`` : FrameContainer + Features describing frame differences, stored in the FrameContainer. + """ + + frames = self.comp_features(data, self.window_size, self.overlap) + + return frames + + + #========================================================================== + def write_feature(self, frames, file_name): + """ + Writes the given data (that has been generated using the __call__ function of this class) to file. + This method overwrites the write_data() method of the Extractor class. + + **Parameters:** + + ``frames`` : + Data returned by the __call__ method of the class. + + ``file_name`` : :py:class:`str` + Name of the file. + """ + + bob.bio.video.extractor.Wrapper(Extractor()).write_feature(frames, file_name) + + + #========================================================================== + def read_feature(self, file_name): + """ + Reads the preprocessed data from file. + This method overwrites the read_data() method of the Extractor class. + + **Parameters:** + + ``file_name`` : :py:class:`str` + Name of the file. + + **Returns:** + + ``frames`` : :py:class:`bob.bio.video.FrameContainer` + Frames stored in the frame container. + """ + + frames = bob.bio.video.extractor.Wrapper(Extractor()).read_feature(file_name) + + return frames + + + diff --git a/bob/pad/face/extractor/LBPHistogram.py b/bob/pad/face/extractor/LBPHistogram.py index ee0a583b57f225dbc89e084d941842364171ddb3..a1b15e16a5cc272599b13f9051a30ac8e3ed5547 100644 --- a/bob/pad/face/extractor/LBPHistogram.py +++ b/bob/pad/face/extractor/LBPHistogram.py @@ -7,7 +7,7 @@ import numpy class LBPHistogram(Extractor): """Calculates a normalized LBP histogram over an image. - These features are implemented based on [ChingovskaEffectivnes12]_. + These features are implemented based on [CAM12]_. Parameters ---------- diff --git a/bob/pad/face/extractor/__init__.py b/bob/pad/face/extractor/__init__.py index 18f4b835c9389d6486d8e548a817784646fbb339..310989b454bdd1ebb73ef43b6c4f4a9fd56a72e6 100644 --- a/bob/pad/face/extractor/__init__.py +++ b/bob/pad/face/extractor/__init__.py @@ -3,6 +3,7 @@ from .VideoLBPHistogram import VideoLBPHistogram from .ImageQualityMeasure import ImageQualityMeasure from .VideoDataLoader import VideoDataLoader from .VideoQualityMeasure import VideoQualityMeasure +from .FrameDiffFeatures import FrameDiffFeatures def __appropriate__(*args): """Says object was actually declared here, and not in the import module. @@ -28,5 +29,6 @@ __appropriate__( ImageQualityMeasure, VideoQualityMeasure, VideoDataLoader, + FrameDiffFeatures, ) __all__ = [_ for _ in dir() if not _.startswith('_')] diff --git a/bob/pad/face/preprocessor/VideoFaceCrop.py b/bob/pad/face/preprocessor/VideoFaceCrop.py index 3ea266c98fece64d37d3d751c5c14ad894678ef1..02a4371afec104036954e946bd02fd68ae9b44f1 100644 --- a/bob/pad/face/preprocessor/VideoFaceCrop.py +++ b/bob/pad/face/preprocessor/VideoFaceCrop.py @@ -77,7 +77,7 @@ class VideoFaceCrop(Preprocessor, object): Default: ``False``. ``kwargs`` - Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``. + Remaining keyword parameters passed to the Base constructor, such as ``color_channel`` or ``dtype``. """ #========================================================================== diff --git a/doc/api.rst b/doc/api.rst new file mode 100644 index 0000000000000000000000000000000000000000..4d29355e650ea5fb22acf9ce2fe438ddd99b2df4 --- /dev/null +++ b/doc/api.rst @@ -0,0 +1,37 @@ +.. vim: set fileencoding=utf-8 : + +.. _bob.pad.face.api: + +============ + Python API +============ + +This section lists all the functionality available in this library allowing to run face PAD experiments. + + +Database Interfaces +------------------------------ + + +REPLAY-ATTACK Database +======================== + +.. automodule:: bob.pad.face.database.replay + + +Pre-processors +------------------------------ + +.. automodule:: bob.pad.face.preprocessor + + +Feature Extractors +------------------------------ + +.. automodule:: bob.pad.face.extractor + + +Matching Algorithms +------------------------------ + +.. automodule:: bob.pad.face.algorithm \ No newline at end of file diff --git a/doc/baselines.rst b/doc/baselines.rst index 7569495f0bd047988573b14191e8ef71ef49a4d2..a891a61ae6a1ca10556eec3ba3057d7a21b86b4c 100644 --- a/doc/baselines.rst +++ b/doc/baselines.rst @@ -193,4 +193,52 @@ The ROC curves for the particular experiment can be downloaded from here: ------------ +Frame differences based features (motion analysis) + SVM classifier +======================================================================== + +Detailed description of this PAD pipe-line is given at :ref:`bob.pad.face.resources.face_pad.frame_diff_svm_replayattack`. + +To run this baseline on the `replayattack`_ database, using the ``grandtest`` protocol, execute the following: + +.. code-block:: sh + + $ ./bin/spoof.py frame-diff-svm \ + --database replay --protocol grandtest --groups train dev eval \ + --sub-directory <PATH_TO_STORE_THE_RESULTS> + +.. tip:: + + Similarly to the tip above you can run this baseline in parallel. + +To understand the settings of this baseline PAD experiment you can check the +corresponding configuration file: ``bob/pad/face/config/frame_diff_svm.py`` + +To evaluate the results computing EER, HTER and plotting ROC you can use the +following command: + +.. code-block:: sh + + ./bin/evaluate.py \ + --dev-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-dev \ + --eval-files <PATH_TO_STORE_THE_RESULTS>/grandtest/scores/scores-eval \ + --legends "10 features for each window in Frame Differences + SVM classifier + REPLAY-ATTACK database" \ + -F 7 \ + --criterion EER \ + --roc <PATH_TO_STORE_THE_RESULTS>/ROC.pdf + +The EER/HTER errors for `replayattack`_ database are summarized in the Table below: + ++-------------------+----------+----------+ +| Protocol | EER,\% | HTER,\% | ++===================+==========+==========+ +| ``grandtest`` | 11.752 | 13.195 | ++-------------------+----------+----------+ + +The ROC curves for the particular experiment can be downloaded from here: + +:download:`ROC curve <img/ROC_frame_diff_svm_replay_attack.pdf>` + +------------ + + .. include:: links.rst diff --git a/doc/img/ROC_frame_diff_svm_replay_attack.pdf b/doc/img/ROC_frame_diff_svm_replay_attack.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a04d2b6c36a094b8ac1d941c812c8e724c0c083c Binary files /dev/null and b/doc/img/ROC_frame_diff_svm_replay_attack.pdf differ diff --git a/doc/index.rst b/doc/index.rst index a260d13f553315f56fd1407ed957d81afaa60135..fda461eacc018b1a0541c6b1dcb084586916eee3 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -24,7 +24,7 @@ Users Guide baselines references resources - py_api + api .. todolist:: diff --git a/doc/py_api.rst b/doc/py_api.rst deleted file mode 100644 index be29aac297535990753e19e6adb5ae437289a09c..0000000000000000000000000000000000000000 --- a/doc/py_api.rst +++ /dev/null @@ -1,29 +0,0 @@ -.. vim: set fileencoding=utf-8 : - -.. _bob.pad.face.py_api: - -=================================== - Tools implemented in bob.pad.face -=================================== - -This section listst all the functionality available in this library allowing to run face PAD experiments. - -Databases -=================================== - -Image Preprocessors -=================================== - -Video Preprocessors -=================================== - -Image Extractors -=================================== - -Video Extractors -=================================== - -Image Extractors -=================================== - - diff --git a/doc/references.rst b/doc/references.rst index 911101849170b2dd52a7cd6d1a885887bf7b9166..9b34385ac989b36a6792fcf40dc75187603218b6 100644 --- a/doc/references.rst +++ b/doc/references.rst @@ -12,3 +12,6 @@ References .. [CBVM16] *A. Costa-Pazo, S. Bhattacharjee, E. Vazquez-Fernandez and S. Marcel*, **The Replay-Mobile Face Presentation-Attack Database**, in: Biometrics Special Interest Group (BIOSIG), 2016 BIOSIG - Proceedings of the International Conference of the, 2016, pp. 1-7. + +.. [AM11] *A. Anjos and S. Marcel*, **Counter-measures to photo attacks in face recognition: A public database and a baseline**, + in: 2011 International Joint Conference on Biometrics (IJCB), Washington, DC, 2011, pp. 1-7. diff --git a/doc/resources.rst b/doc/resources.rst index 7b9d574bc3e99c013c3cac8dc84f8d436745dbec..f943abba35447a02e73dca63e70b6be90bfce79a 100644 --- a/doc/resources.rst +++ b/doc/resources.rst @@ -10,19 +10,27 @@ This section contains a listing of all ready-to-use resources you can find in this package. +--------------------------------- + .. _bob.pad.face.resources.databases: Databases ------------ +These configuration files/resources contain entry points for the ``--database`` command line argument of the +``spoof.py`` script. +.. _bob.pad.face.resources.databases.replay: +Replay-attack Database +================================================================================ +.. automodule:: bob.pad.face.config.database.replay + :members: - - +--------------------------------- .. _bob.pad.face.resources.face_pad: @@ -42,7 +50,7 @@ The configuration files contain at least the following arguments of the ``spoof. .. _bob.pad.face.resources.face_pad.lbp_svm_replayattack: LBP features of facial region + SVM for REPLAY-ATTACK -====================================================== +================================================================================ .. automodule:: bob.pad.face.config.lbp_svm :members: @@ -54,4 +62,13 @@ Image Quality Measures as features of facial region + SVM for REPLAY-ATTACK ================================================================================ .. automodule:: bob.pad.face.config.qm_svm - :members: \ No newline at end of file + :members: + + +.. _bob.pad.face.resources.face_pad.frame_diff_svm_replayattack: + +Frame differences based features (motion analysis) + SVM for REPLAY-ATTACK +================================================================================ + +.. automodule:: bob.pad.face.config.frame_diff_svm + :members: diff --git a/setup.py b/setup.py index 338b4dcfe4b03fda511137b5ae065a7604822587..13ada3f681ca3b229b4a059ed700f8b52a976cd6 100644 --- a/setup.py +++ b/setup.py @@ -104,6 +104,7 @@ setup( # baselines: 'lbp-svm = bob.pad.face.config.lbp_svm', 'qm-svm = bob.pad.face.config.qm_svm', + 'frame-diff-svm = bob.pad.face.config.frame_diff_svm', ], # registered preprocessors: @@ -120,6 +121,8 @@ setup( 'bob.pad.extractor': [ 'video-lbp-histogram-extractor-n8r1-uniform = bob.pad.face.config.extractor.video_lbp_histogram:video_lbp_histogram_extractor_n8r1_uniform', 'video-quality-measure-galbally-msu = bob.pad.face.config.extractor.video_quality_measure:video_quality_measure_galbally_msu', + 'frame-diff-feat-extr-w20-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w20_over0', + 'frame-diff-feat-extr-w100-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w100_over0', ], # registered algorithms: