From 1c50cb31d6fb1120b04b4f8889a32e96bc7eb74f Mon Sep 17 00:00:00 2001
From: Olegs NIKISINS <onikisins@italix03.idiap.ch>
Date: Wed, 6 Sep 2017 16:33:33 +0200
Subject: [PATCH] Added one-class GMM config file modified some other configs

---
 .../face/algorithm/VideoGmmPadAlgorithm.py    |  2 +-
 .../face/config/frame_diff_one_class_svm.py   | 94 ------------------
 .../{qm_lr_aggregated_db.py => qm_lr.py}      |  8 +-
 bob/pad/face/config/qm_one_class_gmm.py       | 96 +++++++++++++++++++
 bob/pad/face/config/qm_svm_aggregated_db.py   |  4 +
 setup.py                                      | 12 ++-
 6 files changed, 112 insertions(+), 104 deletions(-)
 delete mode 100644 bob/pad/face/config/frame_diff_one_class_svm.py
 rename bob/pad/face/config/{qm_lr_aggregated_db.py => qm_lr.py} (94%)
 create mode 100644 bob/pad/face/config/qm_one_class_gmm.py

diff --git a/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py b/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
index 366e3fd0..39511ae5 100644
--- a/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
+++ b/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
@@ -52,7 +52,7 @@ class VideoGmmPadAlgorithm(Algorithm):
 
     def __init__(self,
                  n_components = 1,
-                 random_state = 7,
+                 random_state = 3,
                  frame_level_scores_flag = False):
 
 
diff --git a/bob/pad/face/config/frame_diff_one_class_svm.py b/bob/pad/face/config/frame_diff_one_class_svm.py
deleted file mode 100644
index 320522a1..00000000
--- a/bob/pad/face/config/frame_diff_one_class_svm.py
+++ /dev/null
@@ -1,94 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-
-"""
-This file contains configurations to run Frame Differences and SVM based face PAD baseline.
-The settings are tuned for the Replay-attack database.
-The idea of the algorithms is inherited from the following paper: [AM11]_.
-"""
-
-
-#=======================================================================================
-sub_directory = 'frame_diff_svm'
-"""
-Sub-directory where results will be placed.
-
-You may change this setting using the ``--sub-directory`` command-line option
-or the attribute ``sub_directory`` in a configuration file loaded **after**
-this resource.
-"""
-
-
-#=======================================================================================
-# define preprocessor:
-
-from ..preprocessor import FrameDifference
-
-NUMBER_OF_FRAMES = None # process all frames
-CHECK_FACE_SIZE_FLAG = True # Check size of the face
-MIN_FACE_SIZE = 50 # Minimal size of the face to consider
-
-preprocessor = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
-                               check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                               min_face_size = MIN_FACE_SIZE)
-"""
-In the preprocessing stage the frame differences are computed for both facial and non-facial/background
-regions. In this case all frames of the input video are considered, which is defined by
-``number_of_frames = None``. The frames containing faces of the size below ``min_face_size = 50`` threshold
-are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor.
-The preprocessing idea is introduced in [AM11]_.
-"""
-
-
-#=======================================================================================
-# define extractor:
-
-from ..extractor import FrameDiffFeatures
-
-WINDOW_SIZE=20
-OVERLAP=0
-
-extractor = FrameDiffFeatures(window_size=WINDOW_SIZE,
-                              overlap=OVERLAP)
-"""
-In the feature extraction stage 5 features are extracted for all non-overlapping windows in
-the Frame Difference input signals. Five features are computed for each of windows in the
-facial face regions, the same is done for non-facial regions. The non-overlapping option
-is controlled by ``overlap = 0``. The length of the window is defined by ``window_size``
-argument.
-The features are introduced in the following paper: [AM11]_.
-"""
-
-
-#=======================================================================================
-# define algorithm:
-
-from ..algorithm import VideoSvmPadAlgorithm
-
-MACHINE_TYPE = 'ONE_CLASS'
-KERNEL_TYPE = 'RBF'
-N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'nu': [0.001, 0.01, 0.05, 0.1], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-"""
-The one-class SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
-One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
-The grid search of SVM parameters is used to select the successful settings.
-The grid search is done on the subset of training data.
-The size of this subset is defined by ``n_samples`` parameter.
-
-The data is also mean-std normalized, ``mean_std_norm_flag = True``.
-"""
-
-
-
-
-
diff --git a/bob/pad/face/config/qm_lr_aggregated_db.py b/bob/pad/face/config/qm_lr.py
similarity index 94%
rename from bob/pad/face/config/qm_lr_aggregated_db.py
rename to bob/pad/face/config/qm_lr.py
index 986afb3d..11c4404c 100644
--- a/bob/pad/face/config/qm_lr_aggregated_db.py
+++ b/bob/pad/face/config/qm_lr.py
@@ -2,16 +2,14 @@
 # -*- coding: utf-8 -*-
 
 """
-This file contains configurations to run Image Quality Measures (IQM) and SVM based face PAD baseline.
+This file contains configurations to run Image Quality Measures (IQM) and LR based face PAD algorithm.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
-In the SVM algorithm the amount of training data is reduced speeding-up the training for
-large data sets, such as Aggregated PAD database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
 
 #=======================================================================================
-sub_directory = 'qm_svm_aggregated_db'
+sub_directory = 'qm_lr'
 """
 Sub-directory where results will be placed.
 
@@ -88,6 +86,8 @@ algorithm = VideoLRPadAlgorithm(C = C,
 """
 The Logistic Regression is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
+values.
 """
 
 
diff --git a/bob/pad/face/config/qm_one_class_gmm.py b/bob/pad/face/config/qm_one_class_gmm.py
new file mode 100644
index 00000000..d6e87704
--- /dev/null
+++ b/bob/pad/face/config/qm_one_class_gmm.py
@@ -0,0 +1,96 @@
+#!/usr/bin/env python2
+# -*- coding: utf-8 -*-
+
+"""
+This file contains configurations to run Image Quality Measures (IQM) and one-class GMM based face PAD algorithm.
+The settings of the preprocessor and extractor are tuned for the Replay-attack database.
+The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+
+#=======================================================================================
+sub_directory = 'qm_one_class_gmm'
+"""
+Sub-directory where results will be placed.
+
+You may change this setting using the ``--sub-directory`` command-line option
+or the attribute ``sub_directory`` in a configuration file loaded **after**
+this resource.
+"""
+
+
+#=======================================================================================
+# define preprocessor:
+
+from ..preprocessor import VideoFaceCrop
+
+CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
+CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+FIXED_POSITIONS = None
+MASK_SIGMA = None             # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
+MASK_SEED = None              # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MIN_FACE_SIZE = 50
+USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
+                             cropped_positions = CROPPED_POSITIONS,
+                             fixed_positions = FIXED_POSITIONS,
+                             mask_sigma = MASK_SIGMA,
+                             mask_neighbors = MASK_NEIGHBORS,
+                             mask_seed = None,
+                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
+                             min_face_size = MIN_FACE_SIZE,
+                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
+                             rgb_output_flag = RGB_OUTPUT_FLAG)
+"""
+In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
+The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
+below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
+"""
+
+
+#=======================================================================================
+# define extractor:
+
+from ..extractor import VideoQualityMeasure
+
+GALBALLY=True
+MSU=True
+DTYPE=None
+
+extractor = VideoQualityMeasure(galbally=GALBALLY,
+                                msu=MSU,
+                                dtype=DTYPE)
+"""
+In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
+The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
+"""
+
+
+#=======================================================================================
+# define algorithm:
+
+from ..algorithm import VideoGmmPadAlgorithm
+
+N_COMPONENTS = 50
+RANDOM_STATE = 3
+FRAME_LEVEL_SCORES_FLAG = True
+
+algorithm = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
+                                 random_state = RANDOM_STATE,
+                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+"""
+The GMM with 50 clusters is trained using samples from the real class only. The pre-trained
+GMM is next used to classify the data into *real* and *attack* classes.
+One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
+"""
+
+
+
+
+
diff --git a/bob/pad/face/config/qm_svm_aggregated_db.py b/bob/pad/face/config/qm_svm_aggregated_db.py
index 4bd54236..a9deb1a5 100644
--- a/bob/pad/face/config/qm_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_svm_aggregated_db.py
@@ -110,3 +110,7 @@ The data is also mean-std normalized, ``mean_std_norm_flag = True``.
 """
 
 
+
+
+
+
diff --git a/setup.py b/setup.py
index f92af0e1..25eaac9a 100644
--- a/setup.py
+++ b/setup.py
@@ -84,16 +84,18 @@ setup(
             'qm-svm = bob.pad.face.config.qm_svm',
             'qm-svm-aggregated-db = bob.pad.face.config.qm_svm_aggregated_db',
 
-            'qm-one-class-svm-aggregated-db = bob.pad.face.config.qm_one_class_svm_aggregated_db',
-            'qm-one-class-svm-cascade-aggregated-db = bob.pad.face.config.qm_one_class_svm_cascade_aggregated_db',
-
             'frame-diff-svm = bob.pad.face.config.frame_diff_svm',
             'frame-diff-svm-aggregated-db = bob.pad.face.config.frame_diff_svm_aggregated_db',
 
-            'frame-diff-one-class-svm = bob.pad.face.config.frame_diff_one_class_svm',
+            # baselines using one-class SVM
+            'qm-one-class-svm-aggregated-db = bob.pad.face.config.qm_one_class_svm_aggregated_db',
+            'qm-one-class-svm-cascade-aggregated-db = bob.pad.face.config.qm_one_class_svm_cascade_aggregated_db',
 
             # baselines using LR:
-            'qm-lr-aggregated-db = bob.pad.face.config.qm_lr_aggregated_db',
+            'qm-lr = bob.pad.face.config.qm_lr', # this pipe-line can be used both for individual and Aggregated databases.
+
+            # baselines using GMM:
+            'qm-one-class-gmm = bob.pad.face.config.qm_one_class_gmm', # this pipe-line can be used both for individual and Aggregated databases.
             ],
 
         # registered preprocessors:
-- 
GitLab