From b00b936b33de6fa3ede12595d7a2e3a53aedb8ab Mon Sep 17 00:00:00 2001
From: Amir MOHAMMADI <amir.mohammadi@idiap.ch>
Date: Thu, 23 Nov 2017 12:16:02 +0100
Subject: [PATCH] pep8 formatting done by yapf -ri .

---
 bob/pad/face/__init__.py                      |   1 -
 .../algorithm/VideoCascadeSvmPadAlgorithm.py  | 314 ++++++++-------
 .../face/algorithm/VideoGmmPadAlgorithm.py    |  76 ++--
 bob/pad/face/algorithm/VideoLRPadAlgorithm.py | 147 +++----
 .../face/algorithm/VideoSvmPadAlgorithm.py    | 268 ++++++++-----
 bob/pad/face/algorithm/__init__.py            |   1 +
 bob/pad/face/config/aggregated_db.py          |   3 +-
 .../video_cascade_svm_pad_algorithm.py        | 233 +++++------
 .../algorithm/video_gmm_pad_algorithm.py      | 179 ++++-----
 .../algorithm/video_svm_pad_algorithm.py      |  64 ++--
 bob/pad/face/config/database/aggregated_db.py |   5 +-
 bob/pad/face/config/database/mifs.py          |   5 +-
 bob/pad/face/config/database/msu_mfsd.py      |   6 +-
 bob/pad/face/config/database/replay_attack.py |   7 +-
 bob/pad/face/config/database/replay_mobile.py |   5 +-
 .../config/extractor/frame_diff_features.py   |  12 +-
 .../config/extractor/video_lbp_histogram.py   |  26 +-
 .../config/extractor/video_quality_measure.py |  12 +-
 bob/pad/face/config/frame_diff_svm.py         |  54 ++-
 .../config/frame_diff_svm_aggregated_db.py    |  69 ++--
 bob/pad/face/config/grid.py                   |  14 +-
 bob/pad/face/config/lbp_svm.py                |  97 ++---
 bob/pad/face/config/lbp_svm_aggregated_db.py  | 111 +++---
 bob/pad/face/config/mifs.py                   |   4 +-
 bob/pad/face/config/msu_mfsd.py               |   3 +-
 bob/pad/face/config/preprocessor/filename.py  |   1 -
 .../config/preprocessor/video_face_crop.py    |  95 +++--
 .../preprocessor/video_sparse_coding.py       |  35 +-
 bob/pad/face/config/qm_lr.py                  |  65 ++--
 bob/pad/face/config/qm_one_class_gmm.py       |  66 ++--
 .../config/qm_one_class_svm_aggregated_db.py  |  90 +++--
 .../qm_one_class_svm_cascade_aggregated_db.py |  68 ++--
 bob/pad/face/config/qm_svm.py                 |  79 ++--
 bob/pad/face/config/qm_svm_aggregated_db.py   |  96 +++--
 bob/pad/face/config/replay_attack.py          |   3 +-
 bob/pad/face/config/replay_mobile.py          |   3 +-
 bob/pad/face/database/__init__.py             |   8 +-
 bob/pad/face/database/aggregated_db.py        | 340 ++++++++++------
 bob/pad/face/database/mifs.py                 |  54 +--
 bob/pad/face/database/msu_mfsd.py             |  84 ++--
 bob/pad/face/database/replay.py               |  82 ++--
 bob/pad/face/database/replay_mobile.py        |  80 ++--
 bob/pad/face/extractor/FrameDiffFeatures.py   |  40 +-
 bob/pad/face/extractor/ImageQualityMeasure.py |  20 +-
 bob/pad/face/extractor/LBPHistogram.py        |  59 ++-
 bob/pad/face/extractor/VideoDataLoader.py     |  18 +-
 bob/pad/face/extractor/VideoLBPHistogram.py   |  36 +-
 bob/pad/face/extractor/VideoQualityMeasure.py |  29 +-
 bob/pad/face/preprocessor/FrameDifference.py  | 119 +++---
 bob/pad/face/preprocessor/ImageFaceCrop.py    |  40 +-
 bob/pad/face/preprocessor/VideoFaceCrop.py    | 142 ++++---
 .../face/preprocessor/VideoSparseCoding.py    | 280 ++++++++------
 bob/pad/face/preprocessor/__init__.py         |   1 -
 bob/pad/face/test/test.py                     | 362 +++++++++---------
 bob/pad/face/test/test_databases.py           | 172 ++++++---
 bob/pad/face/utils/face_detection_utils.py    |  68 ++--
 bootstrap-buildout.py                         |  99 ++---
 doc/conf.py                                   |  54 ++-
 setup.py                                      |  75 ++--
 59 files changed, 2484 insertions(+), 2095 deletions(-)

diff --git a/bob/pad/face/__init__.py b/bob/pad/face/__init__.py
index 37b53c4f..d1ea6f77 100644
--- a/bob/pad/face/__init__.py
+++ b/bob/pad/face/__init__.py
@@ -11,4 +11,3 @@ def get_config():
 
 # gets sphinx autodoc done right - don't remove it
 __all__ = [_ for _ in dir() if not _.startswith('_')]
-
diff --git a/bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py b/bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py
index c5690a61..5da451bc 100644
--- a/bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py
+++ b/bob/pad/face/algorithm/VideoCascadeSvmPadAlgorithm.py
@@ -28,6 +28,7 @@ from bob.bio.video.utils import FrameContainer
 #==============================================================================
 # Main body :
 
+
 class VideoCascadeSvmPadAlgorithm(Algorithm):
     """
     This class is designed to train the **cascede** of SVMs given Frame Containers
@@ -83,23 +84,24 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
     """
 
     def __init__(self,
-                 machine_type = 'C_SVC',
-                 kernel_type = 'RBF',
-                 svm_kwargs = {'cost': 1, 'gamma': 0},
-                 N = 2,
-                 pos_scores_slope = 0.01,
-                 frame_level_scores_flag = False):
-
-
-        Algorithm.__init__(self,
-                           machine_type = machine_type,
-                           kernel_type = kernel_type,
-                           svm_kwargs = svm_kwargs,
-                           N = N,
-                           pos_scores_slope = pos_scores_slope,
-                           frame_level_scores_flag = frame_level_scores_flag,
-                           performs_projection=True,
-                           requires_projector_training=True)
+                 machine_type='C_SVC',
+                 kernel_type='RBF',
+                 svm_kwargs={'cost': 1,
+                             'gamma': 0},
+                 N=2,
+                 pos_scores_slope=0.01,
+                 frame_level_scores_flag=False):
+
+        Algorithm.__init__(
+            self,
+            machine_type=machine_type,
+            kernel_type=kernel_type,
+            svm_kwargs=svm_kwargs,
+            N=N,
+            pos_scores_slope=pos_scores_slope,
+            frame_level_scores_flag=frame_level_scores_flag,
+            performs_projection=True,
+            requires_projector_training=True)
 
         self.machine_type = machine_type
         self.kernel_type = kernel_type
@@ -108,13 +110,12 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
         self.pos_scores_slope = pos_scores_slope
         self.frame_level_scores_flag = frame_level_scores_flag
 
-        self.pca_projector_file_name = "pca_projector" # pca machine will be saved to .hdf5 file with this name
-        self.svm_projector_file_name = "svm_projector" # svm machines will be saved to .hdf5 files with this name augumented by machine number
+        self.pca_projector_file_name = "pca_projector"  # pca machine will be saved to .hdf5 file with this name
+        self.svm_projector_file_name = "svm_projector"  # svm machines will be saved to .hdf5 files with this name augumented by machine number
 
         self.pca_machine = None
         self.svm_machines = None
 
-
     #==========================================================================
     def convert_frame_cont_to_array(self, frame_container):
         """
@@ -151,7 +152,6 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         return features_array
 
-
     #==========================================================================
     def convert_list_of_frame_cont_to_array(self, frame_containers):
         """
@@ -176,15 +176,15 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         for frame_container in frame_containers:
 
-            video_features_array = self.convert_frame_cont_to_array(frame_container)
+            video_features_array = self.convert_frame_cont_to_array(
+                frame_container)
 
-            feature_vectors.append( video_features_array )
+            feature_vectors.append(video_features_array)
 
         features_array = np.vstack(feature_vectors)
 
         return features_array
 
-
     #==========================================================================
     def comp_prediction_precision(self, machine, real, attack):
         """
@@ -214,13 +214,16 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         samples_num = len(labels_real) + len(labels_attack)
 
-        precision = ( np.sum(labels_real == 1) + np.sum(labels_attack == -1) ).astype( np.float ) / samples_num
+        precision = (np.sum(labels_real == 1) + np.sum(labels_attack == -1)
+                     ).astype(np.float) / samples_num
 
         return precision
 
-
     #==========================================================================
-    def mean_std_normalize(self, features, features_mean= None, features_std = None):
+    def mean_std_normalize(self,
+                           features,
+                           features_mean=None,
+                           features_std=None):
         """
         The features in the input 2D array are mean-std normalized.
         The rows are samples, the columns are features. If ``features_mean``
@@ -262,7 +265,7 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         row_norm_list = []
 
-        for row in features: # row is a sample
+        for row in features:  # row is a sample
 
             row_norm = (row - features_mean) / features_std
 
@@ -272,7 +275,6 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         return features_norm, features_mean, features_std
 
-
     #==========================================================================
     def norm_train_data(self, real, attack, one_class_flag):
         """
@@ -308,22 +310,27 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        if not( one_class_flag ): # two-class SVM case
+        if not (one_class_flag):  # two-class SVM case
 
             features = np.vstack([real, attack])
-            features_norm, features_mean, features_std = self.mean_std_normalize(features)
-            real_norm =   features_norm[0:real.shape[0], :] # The array is now normalized
-            attack_norm = features_norm[real.shape[0]:, :] # The array is now normalized
+            features_norm, features_mean, features_std = self.mean_std_normalize(
+                features)
+            real_norm = features_norm[0:real.shape[
+                0], :]  # The array is now normalized
+            attack_norm = features_norm[real.shape[
+                0]:, :]  # The array is now normalized
 
-        else: # one-class SVM case
+        else:  # one-class SVM case
 
-            real_norm, features_mean, features_std = self.mean_std_normalize(real) # use only real class to compute normalizers
+            real_norm, features_mean, features_std = self.mean_std_normalize(
+                real)  # use only real class to compute normalizers
             attack_norm = []
+
+
 #            attack_norm = self.mean_std_normalize(attack, features_mean, features_std)
 
         return real_norm, attack_norm, features_mean, features_std
 
-
     #==========================================================================
     def train_pca(self, data):
         """
@@ -349,17 +356,17 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
         # 1. Normalize the training data:
         data_norm, features_mean, features_std = self.mean_std_normalize(data)
 
-        trainer = bob.learn.linear.PCATrainer() # Creates a PCA trainer
+        trainer = bob.learn.linear.PCATrainer()  # Creates a PCA trainer
 
-        [machine, eig_vals] = trainer.train(data_norm)  # Trains the machine with the given data
+        [machine, eig_vals] = trainer.train(
+            data_norm)  # Trains the machine with the given data
 
         # Set the normalizers for the PCA machine, needed to normalize the test samples.
-        machine.input_subtract = features_mean # subtract the mean of train data
-        machine.input_divide   = features_std  # divide by std of train data
+        machine.input_subtract = features_mean  # subtract the mean of train data
+        machine.input_divide = features_std  # divide by std of train data
 
         return machine, eig_vals
 
-
     #==========================================================================
     def train_svm(self, real, attack, machine_type, kernel_type, svm_kwargs):
         """
@@ -394,37 +401,40 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
             machine.
         """
 
-        one_class_flag = (machine_type == 'ONE_CLASS') # True if one-class SVM is used
+        one_class_flag = (
+            machine_type == 'ONE_CLASS')  # True if one-class SVM is used
 
         # Mean-std normalize the data before training
-        real, attack, features_mean, features_std = self.norm_train_data(real, attack, one_class_flag)
+        real, attack, features_mean, features_std = self.norm_train_data(
+            real, attack, one_class_flag)
         # real and attack - are now mean-std normalized
 
-        trainer = bob.learn.libsvm.Trainer(machine_type = machine_type,
-                                           kernel_type = kernel_type,
-                                           probability = True)
+        trainer = bob.learn.libsvm.Trainer(
+            machine_type=machine_type,
+            kernel_type=kernel_type,
+            probability=True)
 
         for key in svm_kwargs.keys():
 
-            setattr(trainer, key, svm_kwargs[key]) # set the hyper-parameters of the SVM
+            setattr(trainer, key,
+                    svm_kwargs[key])  # set the hyper-parameters of the SVM
 
-        if not( one_class_flag ): # two-class SVM case
+        if not (one_class_flag):  # two-class SVM case
 
-            data = [real, attack] # data for final training
+            data = [real, attack]  # data for final training
 
-        else: # one-class SVM case
+        else:  # one-class SVM case
 
-            data = [real] # only real class used for training
+            data = [real]  # only real class used for training
 
-        machine = trainer.train(data) # train the machine
+        machine = trainer.train(data)  # train the machine
 
         # add the normalizers to the trained SVM machine
-        machine.input_subtract = features_mean # subtract the mean of train data
-        machine.input_divide   = features_std  # divide by std of train data
+        machine.input_subtract = features_mean  # subtract the mean of train data
+        machine.input_divide = features_std  # divide by std of train data
 
         return machine
 
-
     #==========================================================================
     def get_data_start_end_idx(self, data, N):
         """
@@ -456,19 +466,20 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         n_features = data.shape[1]
 
-        n_machines = np.int(n_features/N)
+        n_machines = np.int(n_features / N)
 
-        if (n_features - n_machines*N) > 1: # if more than one feature remains
+        if (n_features -
+                n_machines * N) > 1:  # if more than one feature remains
 
             machines_num = range(0, n_machines, 1)
 
-            idx_start = [item*N for item in machines_num]
+            idx_start = [item * N for item in machines_num]
 
-            idx_end = [(item+1)*N for item in machines_num]
+            idx_end = [(item + 1) * N for item in machines_num]
 
-            idx_start.append( n_machines*N )
+            idx_start.append(n_machines * N)
 
-            idx_end.append( n_features )
+            idx_end.append(n_features)
 
             n_machines = n_machines + 1
 
@@ -476,15 +487,15 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
             machines_num = range(0, n_machines, 1)
 
-            idx_start = [item*N for item in machines_num]
+            idx_start = [item * N for item in machines_num]
 
-            idx_end = [(item+1)*N for item in machines_num]
+            idx_end = [(item + 1) * N for item in machines_num]
 
         return idx_start, idx_end, n_machines
 
-
     #==========================================================================
-    def train_svm_cascade(self, real, attack, machine_type, kernel_type, svm_kwargs, N):
+    def train_svm_cascade(self, real, attack, machine_type, kernel_type,
+                          svm_kwargs, N):
         """
         Train a cascade of SVMs, one SVM machine per N features. N is usually small
         N = (2, 3). So, if N = 2, the first SVM is trained for features 1 and 2,
@@ -525,7 +536,8 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
             A dictionary containing a cascade of trained SVM machines.
         """
 
-        one_class_flag = (machine_type == 'ONE_CLASS') # True if one-class SVM is used
+        one_class_flag = (
+            machine_type == 'ONE_CLASS')  # True if one-class SVM is used
 
         idx_start, idx_end, n_machines = self.get_data_start_end_idx(real, N)
 
@@ -533,27 +545,31 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         for machine_num in range(0, n_machines, 1):
 
-            if not(one_class_flag): # two-class SVM
+            if not (one_class_flag):  # two-class SVM
 
-                real_subset     = real[:, idx_start[machine_num] : idx_end[machine_num] ] # both real and attack classes are used
-                attack_subset = attack[:, idx_start[machine_num] : idx_end[machine_num] ]
+                real_subset = real[:, idx_start[machine_num]:idx_end[
+                    machine_num]]  # both real and attack classes are used
+                attack_subset = attack[:, idx_start[machine_num]:idx_end[
+                    machine_num]]
 
-            else: # one-class SVM case
+            else:  # one-class SVM case
 
-                real_subset     = real[:, idx_start[machine_num] : idx_end[machine_num] ] # only the real class is used
+                real_subset = real[:, idx_start[machine_num]:idx_end[
+                    machine_num]]  # only the real class is used
                 attack_subset = []
 
-            machine = self.train_svm(real_subset, attack_subset, machine_type, kernel_type, svm_kwargs)
+            machine = self.train_svm(real_subset, attack_subset, machine_type,
+                                     kernel_type, svm_kwargs)
 
-            machines[ str(machine_num) ] = machine
+            machines[str(machine_num)] = machine
 
             del machine
 
         return machines
 
-
     #==========================================================================
-    def train_pca_svm_cascade(self, real, attack, machine_type, kernel_type, svm_kwargs, N):
+    def train_pca_svm_cascade(self, real, attack, machine_type, kernel_type,
+                              svm_kwargs, N):
         """
         This function is designed to train the **cascede** of SVMs given
         features of real and attack classes. The procedure is the following:
@@ -606,26 +622,35 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
             A cascade of SVM machines.
         """
 
-        one_class_flag = (machine_type == 'ONE_CLASS') # True if one-class SVM is used
+        one_class_flag = (
+            machine_type == 'ONE_CLASS')  # True if one-class SVM is used
 
         # 1. Train PCA using normalized features of the real class:
-        pca_machine, _ = self.train_pca(real) # the mean-std normalizers are already set in this machine
+        pca_machine, _ = self.train_pca(
+            real)  # the mean-std normalizers are already set in this machine
 
         # 2. Project the features given PCA machine:
-        if not(one_class_flag):
-            projected_real = pca_machine(real) # the normalizers are already set for the PCA machine, therefore non-normalized data is passed in
-            projected_attack = pca_machine(attack) # the normalizers are already set for the PCA machine, therefore non-normalized data is passed in
+        if not (one_class_flag):
+            projected_real = pca_machine(
+                real
+            )  # the normalizers are already set for the PCA machine, therefore non-normalized data is passed in
+            projected_attack = pca_machine(
+                attack
+            )  # the normalizers are already set for the PCA machine, therefore non-normalized data is passed in
 
         else:
-            projected_real = pca_machine(real) # the normalizers are already set for the PCA machine, therefore non-normalized data is passed in
+            projected_real = pca_machine(
+                real
+            )  # the normalizers are already set for the PCA machine, therefore non-normalized data is passed in
             projected_attack = []
 
         # 3. Train a cascade of SVM machines using **projected** data
-        svm_machines = self.train_svm_cascade(projected_real, projected_attack, machine_type, kernel_type, svm_kwargs, N)
+        svm_machines = self.train_svm_cascade(projected_real, projected_attack,
+                                              machine_type, kernel_type,
+                                              svm_kwargs, N)
 
         return pca_machine, svm_machines
 
-
     #==========================================================================
     def save_machine(self, projector_file, projector_file_name, machine):
         """
@@ -650,17 +675,19 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         extension = ".hdf5"
 
-        resulting_file_name = os.path.join( os.path.split(projector_file)[0], projector_file_name + extension )
+        resulting_file_name = os.path.join(
+            os.path.split(projector_file)[0], projector_file_name + extension)
 
-        f = bob.io.base.HDF5File(resulting_file_name, 'w') # open hdf5 file to save to
+        f = bob.io.base.HDF5File(resulting_file_name,
+                                 'w')  # open hdf5 file to save to
 
-        machine.save(f) # save the machine and normalization parameters
+        machine.save(f)  # save the machine and normalization parameters
 
         del f
 
-
     #==========================================================================
-    def save_cascade_of_machines(self, projector_file, projector_file_name, machines):
+    def save_cascade_of_machines(self, projector_file, projector_file_name,
+                                 machines):
         """
         Saves a cascade of machines to the hdf5 files. The name of the file is
         specified in ``projector_file_name`` string and will be augumented with
@@ -689,8 +716,8 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
             machine = machines[key]
 
-            self.save_machine(projector_file, augumented_projector_file_name, machine)
-
+            self.save_machine(projector_file, augumented_projector_file_name,
+                              machine)
 
     #==========================================================================
     def train_projector(self, training_features, projector_file):
@@ -714,24 +741,28 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
         """
 
         # training_features[0] - training features for the REAL class.
-        real = self.convert_list_of_frame_cont_to_array(training_features[0]) # output is array
+        real = self.convert_list_of_frame_cont_to_array(
+            training_features[0])  # output is array
         # training_features[1] - training features for the ATTACK class.
-        attack = self.convert_list_of_frame_cont_to_array(training_features[1]) # output is array
+        attack = self.convert_list_of_frame_cont_to_array(
+            training_features[1])  # output is array
 
         # Train the PCA machine and cascade of SVMs
-        pca_machine, svm_machines = self.train_pca_svm_cascade(real = real,
-                                                               attack = attack,
-                                                               machine_type = self.machine_type,
-                                                               kernel_type = self.kernel_type,
-                                                               svm_kwargs = self.svm_kwargs,
-                                                               N = self.N)
+        pca_machine, svm_machines = self.train_pca_svm_cascade(
+            real=real,
+            attack=attack,
+            machine_type=self.machine_type,
+            kernel_type=self.kernel_type,
+            svm_kwargs=self.svm_kwargs,
+            N=self.N)
 
         # Save the PCA machine
-        self.save_machine(projector_file, self.pca_projector_file_name, pca_machine)
+        self.save_machine(projector_file, self.pca_projector_file_name,
+                          pca_machine)
 
         # Save the cascade of SVMs:
-        self.save_cascade_of_machines(projector_file, self.svm_projector_file_name, svm_machines)
-
+        self.save_cascade_of_machines(
+            projector_file, self.svm_projector_file_name, svm_machines)
 
     #==========================================================================
     def load_machine(self, projector_file, projector_file_name):
@@ -759,9 +790,12 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         extension = ".hdf5"
 
-        resulting_file_name = os.path.join( os.path.split(projector_file)[0], projector_file_name + extension ) # name of the file
+        resulting_file_name = os.path.join(
+            os.path.split(projector_file)[0],
+            projector_file_name + extension)  # name of the file
 
-        f = bob.io.base.HDF5File(resulting_file_name, 'r') # file to read the machine from
+        f = bob.io.base.HDF5File(resulting_file_name,
+                                 'r')  # file to read the machine from
 
         if "pca_" in projector_file_name:
 
@@ -775,7 +809,6 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         return machine
 
-
     #==========================================================================
     def get_cascade_file_names(self, projector_file, projector_file_name):
         """
@@ -800,19 +833,19 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
             A list of of **relative** file-names storing the cascade of machines.
         """
 
-        path = os.path.split(projector_file)[0] # directory containing files storing the cascade of machines.
+        path = os.path.split(projector_file)[
+            0]  # directory containing files storing the cascade of machines.
 
         files = []
 
-        for f in os.listdir( path ):
+        for f in os.listdir(path):
 
-            if fnmatch.fnmatch( f, projector_file_name + "*" ):
+            if fnmatch.fnmatch(f, projector_file_name + "*"):
 
                 files.append(f)
 
         return files
 
-
     #==========================================================================
     def load_cascade_of_machines(self, projector_file, projector_file_name):
         """
@@ -839,19 +872,20 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
             the machine, value is the machine itself.
         """
 
-        files = self.get_cascade_file_names(projector_file, projector_file_name) # files storing the cascade
+        files = self.get_cascade_file_names(
+            projector_file, projector_file_name)  # files storing the cascade
 
         machines = {}
 
         for idx, _ in enumerate(files):
 
-            machine = self.load_machine( projector_file, projector_file_name + str(idx) )
+            machine = self.load_machine(projector_file,
+                                        projector_file_name + str(idx))
 
-            machines[ str(idx) ] = machine
+            machines[str(idx)] = machine
 
         return machines
 
-
     #==========================================================================
     def load_projector(self, projector_file):
         """
@@ -876,15 +910,16 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
         """
 
         # Load the PCA machine
-        pca_machine = self.load_machine(projector_file, self.pca_projector_file_name)
+        pca_machine = self.load_machine(projector_file,
+                                        self.pca_projector_file_name)
 
         # Load the cascade of SVMs:
-        svm_machines = self.load_cascade_of_machines(projector_file, self.svm_projector_file_name)
+        svm_machines = self.load_cascade_of_machines(
+            projector_file, self.svm_projector_file_name)
 
         self.pca_machine = pca_machine
         self.svm_machines = svm_machines
 
-
     #==========================================================================
     def combine_scores_of_svm_cascade(self, scores_array, pos_scores_slope):
         """
@@ -914,19 +949,18 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         for col in scores_array.T:
 
-            idx_vec = np.where(col>=0)
+            idx_vec = np.where(col >= 0)
 
-            col[idx_vec] *= pos_scores_slope # multiply positive scores by the constant
+            col[idx_vec] *= pos_scores_slope  # multiply positive scores by the constant
 
             cols.append(col)
 
         scores_array_modified = np.stack(cols, axis=1)
 
-        scores = np.mean(scores_array_modified, axis = 1)
+        scores = np.mean(scores_array_modified, axis=1)
 
         return scores
 
-
     #==========================================================================
     def project(self, feature):
         """
@@ -963,7 +997,9 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
         """
 
         # 1. Convert input array to numpy array if necessary.
-        if isinstance(feature, FrameContainer): # if FrameContainer convert to 2D numpy array
+        if isinstance(
+                feature,
+                FrameContainer):  # if FrameContainer convert to 2D numpy array
 
             features_array = self.convert_frame_cont_to_array(feature)
 
@@ -977,37 +1013,44 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
         # 3. Apply the cascade of SVMs to the preojected features.
         all_scores = []
 
-        idx_start, idx_end, n_machines = self.get_data_start_end_idx(pca_projected_features, self.N)
+        idx_start, idx_end, n_machines = self.get_data_start_end_idx(
+            pca_projected_features, self.N)
 
-        for machine_num in range(0, n_machines, 1): # iterate over SVM machines
+        for machine_num in range(0, n_machines,
+                                 1):  # iterate over SVM machines
 
-            svm_machine = self.svm_machines[ str(machine_num) ] # select a machine
+            svm_machine = self.svm_machines[str(
+                machine_num)]  # select a machine
 
             # subset of PCA projected features to be passed to SVM machine
-            pca_projected_features_subset = pca_projected_features[:, idx_start[machine_num] : idx_end[machine_num] ]
+            pca_projected_features_subset = pca_projected_features[:, idx_start[
+                machine_num]:idx_end[machine_num]]
 
             # for two-class SVM select the scores corresponding to the real class only, done by [:,0]. Index [0] selects the class Index [1] selects the score..
-            single_machine_scores = svm_machine.predict_class_and_scores( pca_projected_features_subset )[1][:,0]
+            single_machine_scores = svm_machine.predict_class_and_scores(
+                pca_projected_features_subset)[1][:, 0]
 
             all_scores.append(single_machine_scores)
 
-        all_scores_array   = np.stack(all_scores, axis = 1).astype(np.float)
+        all_scores_array = np.stack(all_scores, axis=1).astype(np.float)
 
         # 4. Combine the scores:
 
-        one_class_flag = (svm_machine.machine_type == 'ONE_CLASS') # True if one-class SVM is used
+        one_class_flag = (svm_machine.machine_type == 'ONE_CLASS'
+                          )  # True if one-class SVM is used
 
-        if not(one_class_flag):
+        if not (one_class_flag):
 
-            scores = np.mean(all_scores_array, axis = 1) # compute mean for two-class SVM
+            scores = np.mean(
+                all_scores_array, axis=1)  # compute mean for two-class SVM
 
-        else: # one class SVM case
+        else:  # one class SVM case
 
-            scores = self.combine_scores_of_svm_cascade(all_scores_array, self.pos_scores_slope)
+            scores = self.combine_scores_of_svm_cascade(
+                all_scores_array, self.pos_scores_slope)
 
         return scores
 
-
     #==========================================================================
     def score(self, toscore):
         """
@@ -1040,11 +1083,6 @@ class VideoCascadeSvmPadAlgorithm(Algorithm):
 
         else:
 
-            score = [np.mean( toscore )] # compute a single score per video
+            score = [np.mean(toscore)]  # compute a single score per video
 
         return score
-
-
-
-
-
diff --git a/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py b/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
index bfadb586..a394cfc8 100644
--- a/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
+++ b/bob/pad/face/algorithm/VideoGmmPadAlgorithm.py
@@ -19,10 +19,10 @@ import bob.io.base
 
 from sklearn import mixture
 
-
 # ==============================================================================
 # Main body :
 
+
 class VideoGmmPadAlgorithm(Algorithm):
     """
     This class is designed to train a GMM based PAD system. The GMM is trained
@@ -55,12 +55,13 @@ class VideoGmmPadAlgorithm(Algorithm):
                  random_state=3,
                  frame_level_scores_flag=False):
 
-        Algorithm.__init__(self,
-                           n_components=n_components,
-                           random_state=random_state,
-                           frame_level_scores_flag=frame_level_scores_flag,
-                           performs_projection=True,
-                           requires_projector_training=True)
+        Algorithm.__init__(
+            self,
+            n_components=n_components,
+            random_state=random_state,
+            frame_level_scores_flag=frame_level_scores_flag,
+            performs_projection=True,
+            requires_projector_training=True)
 
         self.n_components = n_components
 
@@ -75,8 +76,11 @@ class VideoGmmPadAlgorithm(Algorithm):
         self.features_std = None  # this argument will be updated with features std
 
         # names of the arguments of the pretrained GMM machine to be saved/loaded to/from HDF5 file:
-        self.gmm_param_keys = ["covariance_type", "covariances_", "lower_bound_", "means_", "n_components", "weights_",
-                               "converged_", "precisions_", "precisions_cholesky_"]
+        self.gmm_param_keys = [
+            "covariance_type", "covariances_", "lower_bound_", "means_",
+            "n_components", "weights_", "converged_", "precisions_",
+            "precisions_cholesky_"
+        ]
 
     # ==========================================================================
     def convert_frame_cont_to_array(self, frame_container):
@@ -132,7 +136,9 @@ class VideoGmmPadAlgorithm(Algorithm):
             An array containing features for all samples and frames.
         """
 
-        if isinstance(features[0], FrameContainer):  # if FrameContainer convert to 2D numpy array
+        if isinstance(
+                features[0],
+                FrameContainer):  # if FrameContainer convert to 2D numpy array
             return self.convert_list_of_frame_cont_to_array(features)
         else:
             return np.vstack(features)
@@ -160,7 +166,8 @@ class VideoGmmPadAlgorithm(Algorithm):
         feature_vectors = []
 
         for frame_container in frame_containers:
-            video_features_array = self.convert_frame_cont_to_array(frame_container)
+            video_features_array = self.convert_frame_cont_to_array(
+                frame_container)
 
             feature_vectors.append(video_features_array)
 
@@ -169,7 +176,10 @@ class VideoGmmPadAlgorithm(Algorithm):
         return features_array
 
     # ==========================================================================
-    def mean_std_normalize(self, features, features_mean=None, features_std=None):
+    def mean_std_normalize(self,
+                           features,
+                           features_mean=None,
+                           features_std=None):
         """
         The features in the input 2D array are mean-std normalized.
         The rows are samples, the columns are features. If ``features_mean``
@@ -250,19 +260,22 @@ class VideoGmmPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        features_norm, features_mean, features_std = self.mean_std_normalize(real)
+        features_norm, features_mean, features_std = self.mean_std_normalize(
+            real)
         # real is now mean-std normalized
 
-        machine = mixture.GaussianMixture(n_components=n_components,
-                                          random_state=random_state,
-                                          covariance_type='full')
+        machine = mixture.GaussianMixture(
+            n_components=n_components,
+            random_state=random_state,
+            covariance_type='full')
 
         machine.fit(features_norm)
 
         return machine, features_mean, features_std
 
     # ==========================================================================
-    def save_gmm_machine_and_mean_std(self, projector_file, machine, features_mean, features_std):
+    def save_gmm_machine_and_mean_std(self, projector_file, machine,
+                                      features_mean, features_std):
         """
         Saves the GMM machine, features mean and std to the hdf5 file.
         The absolute name of the file is specified in ``projector_file`` string.
@@ -284,7 +297,8 @@ class VideoGmmPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        f = bob.io.base.HDF5File(projector_file, 'w')  # open hdf5 file to save to
+        f = bob.io.base.HDF5File(projector_file,
+                                 'w')  # open hdf5 file to save to
 
         for key in self.gmm_param_keys:
             data = getattr(machine, key)
@@ -317,18 +331,21 @@ class VideoGmmPadAlgorithm(Algorithm):
         """
 
         # training_features[0] - training features for the REAL class.
-        real = self.convert_and_prepare_features(training_features[0])  # output is array
+        real = self.convert_and_prepare_features(
+            training_features[0])  # output is array
 
         # training_features[1] - training features for the ATTACK class.
         #        attack = self.convert_and_prepare_features(training_features[1]) # output is array
 
         # Train the GMM machine and get normalizers:
-        machine, features_mean, features_std = self.train_gmm(real=real,
-                                                              n_components=self.n_components,
-                                                              random_state=self.random_state)
+        machine, features_mean, features_std = self.train_gmm(
+            real=real,
+            n_components=self.n_components,
+            random_state=self.random_state)
 
         # Save the GNN machine and normalizers:
-        self.save_gmm_machine_and_mean_std(projector_file, machine, features_mean, features_std)
+        self.save_gmm_machine_and_mean_std(projector_file, machine,
+                                           features_mean, features_std)
 
     # ==========================================================================
     def load_gmm_machine_and_mean_std(self, projector_file):
@@ -354,7 +371,8 @@ class VideoGmmPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        f = bob.io.base.HDF5File(projector_file, 'r')  # file to read the machine from
+        f = bob.io.base.HDF5File(projector_file,
+                                 'r')  # file to read the machine from
 
         # initialize the machine:
         machine = mixture.GaussianMixture()
@@ -397,7 +415,8 @@ class VideoGmmPadAlgorithm(Algorithm):
             ``load_cascade_of_machines`` methods of this class for more details.
         """
 
-        machine, features_mean, features_std = self.load_gmm_machine_and_mean_std(projector_file)
+        machine, features_mean, features_std = self.load_gmm_machine_and_mean_std(
+            projector_file)
 
         self.machine = machine
 
@@ -437,7 +456,9 @@ class VideoGmmPadAlgorithm(Algorithm):
         """
 
         # 1. Convert input array to numpy array if necessary.
-        if isinstance(feature, FrameContainer):  # if FrameContainer convert to 2D numpy array
+        if isinstance(
+                feature,
+                FrameContainer):  # if FrameContainer convert to 2D numpy array
 
             features_array = self.convert_frame_cont_to_array(feature)
 
@@ -445,7 +466,8 @@ class VideoGmmPadAlgorithm(Algorithm):
 
             features_array = feature
 
-        features_array_norm, _, _ = self.mean_std_normalize(features_array, self.features_mean, self.features_std)
+        features_array_norm, _, _ = self.mean_std_normalize(
+            features_array, self.features_mean, self.features_std)
 
         scores = self.machine.score_samples(features_array_norm)
 
diff --git a/bob/pad/face/algorithm/VideoLRPadAlgorithm.py b/bob/pad/face/algorithm/VideoLRPadAlgorithm.py
index ff05a55b..2788e806 100644
--- a/bob/pad/face/algorithm/VideoLRPadAlgorithm.py
+++ b/bob/pad/face/algorithm/VideoLRPadAlgorithm.py
@@ -19,10 +19,10 @@ from sklearn import linear_model
 
 import bob.io.base
 
-
 #==============================================================================
 # Main body :
 
+
 class VideoLRPadAlgorithm(Algorithm):
     """
     This class is designed to train Logistic Regression classifier given Frame Containers
@@ -63,23 +63,23 @@ class VideoLRPadAlgorithm(Algorithm):
     """
 
     def __init__(self,
-                 C = 1,
-                 frame_level_scores_flag = False,
-                 subsample_train_data_flag = False,
-                 subsampling_step = 10,
-                 subsample_videos_flag = False,
-                 video_subsampling_step = 3):
-
-
-        Algorithm.__init__(self,
-                           C = C,
-                           frame_level_scores_flag = frame_level_scores_flag,
-                           subsample_train_data_flag = subsample_train_data_flag,
-                           subsampling_step = subsampling_step,
-                           subsample_videos_flag = subsample_videos_flag,
-                           video_subsampling_step = video_subsampling_step,
-                           performs_projection=True,
-                           requires_projector_training=True)
+                 C=1,
+                 frame_level_scores_flag=False,
+                 subsample_train_data_flag=False,
+                 subsampling_step=10,
+                 subsample_videos_flag=False,
+                 video_subsampling_step=3):
+
+        Algorithm.__init__(
+            self,
+            C=C,
+            frame_level_scores_flag=frame_level_scores_flag,
+            subsample_train_data_flag=subsample_train_data_flag,
+            subsampling_step=subsampling_step,
+            subsample_videos_flag=subsample_videos_flag,
+            video_subsampling_step=video_subsampling_step,
+            performs_projection=True,
+            requires_projector_training=True)
 
         self.C = C
 
@@ -93,15 +93,14 @@ class VideoLRPadAlgorithm(Algorithm):
 
         self.video_subsampling_step = video_subsampling_step
 
-        self.lr_machine = None # this argument will be updated with pretrained LR machine
+        self.lr_machine = None  # this argument will be updated with pretrained LR machine
 
-        self.features_mean = None # this argument will be updated with features mean
-        self.features_std = None # this argument will be updated with features std
+        self.features_mean = None  # this argument will be updated with features mean
+        self.features_std = None  # this argument will be updated with features std
 
         # names of the arguments of the pretrained LR machine to be saved/loaded to/from HDF5 file:
         self.lr_param_keys = ["C", "classes_", "coef_", "intercept_"]
 
-
     #==========================================================================
     def convert_frame_cont_to_array(self, frame_container):
         """
@@ -138,7 +137,6 @@ class VideoLRPadAlgorithm(Algorithm):
 
         return features_array
 
-
     #==========================================================================
     def convert_list_of_frame_cont_to_array(self, frame_containers):
         """
@@ -163,17 +161,20 @@ class VideoLRPadAlgorithm(Algorithm):
 
         for frame_container in frame_containers:
 
-            video_features_array = self.convert_frame_cont_to_array(frame_container)
+            video_features_array = self.convert_frame_cont_to_array(
+                frame_container)
 
-            feature_vectors.append( video_features_array )
+            feature_vectors.append(video_features_array)
 
         features_array = np.vstack(feature_vectors)
 
         return features_array
 
-
     #==========================================================================
-    def mean_std_normalize(self, features, features_mean= None, features_std = None):
+    def mean_std_normalize(self,
+                           features,
+                           features_mean=None,
+                           features_std=None):
         """
         The features in the input 2D array are mean-std normalized.
         The rows are samples, the columns are features. If ``features_mean``
@@ -215,7 +216,7 @@ class VideoLRPadAlgorithm(Algorithm):
 
         row_norm_list = []
 
-        for row in features: # row is a sample
+        for row in features:  # row is a sample
 
             row_norm = (row - features_mean) / features_std
 
@@ -225,7 +226,6 @@ class VideoLRPadAlgorithm(Algorithm):
 
         return features_norm, features_mean, features_std
 
-
     #==========================================================================
     def norm_train_data(self, real, attack):
         """
@@ -258,11 +258,11 @@ class VideoLRPadAlgorithm(Algorithm):
 
         real_norm, features_mean, features_std = self.mean_std_normalize(real)
 
-        attack_norm, _, _ = self.mean_std_normalize(attack, features_mean, features_std)
+        attack_norm, _, _ = self.mean_std_normalize(attack, features_mean,
+                                                    features_std)
 
         return real_norm, attack_norm, features_mean, features_std
 
-
     #==========================================================================
     def train_lr(self, real, attack, C):
         """
@@ -294,22 +294,23 @@ class VideoLRPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        real, attack, features_mean, features_std = self.norm_train_data(real, attack)
+        real, attack, features_mean, features_std = self.norm_train_data(
+            real, attack)
         # real and attack - are now mean-std normalized
 
         X = np.vstack([real, attack])
 
-        Y = np.hstack( [ np.zeros(len(real) ), np.ones(len(attack) ) ] )
+        Y = np.hstack([np.zeros(len(real)), np.ones(len(attack))])
 
-        machine = linear_model.LogisticRegression( C = C )
+        machine = linear_model.LogisticRegression(C=C)
 
         machine.fit(X, Y)
 
         return machine, features_mean, features_std
 
-
     #==========================================================================
-    def save_lr_machine_and_mean_std(self, projector_file, machine, features_mean, features_std):
+    def save_lr_machine_and_mean_std(self, projector_file, machine,
+                                     features_mean, features_std):
         """
         Saves the LR machine, features mean and std to the hdf5 file.
         The absolute name of the file is specified in ``projector_file`` string.
@@ -331,21 +332,21 @@ class VideoLRPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        f = bob.io.base.HDF5File(projector_file, 'w') # open hdf5 file to save to
+        f = bob.io.base.HDF5File(projector_file,
+                                 'w')  # open hdf5 file to save to
 
-        for key in self.lr_param_keys: # ["C", "classes_", "coef_", "intercept_"]
+        for key in self.lr_param_keys:  # ["C", "classes_", "coef_", "intercept_"]
 
-            data = getattr( machine, key )
+            data = getattr(machine, key)
 
-            f.set( key, data )
+            f.set(key, data)
 
-        f.set( "features_mean", features_mean )
+        f.set("features_mean", features_mean)
 
-        f.set( "features_std", features_std )
+        f.set("features_std", features_std)
 
         del f
 
-
     #==========================================================================
     def subsample_train_videos(self, training_features, step):
         """
@@ -371,7 +372,6 @@ class VideoLRPadAlgorithm(Algorithm):
 
         return training_features_subset
 
-
     #==========================================================================
     def train_projector(self, training_features, projector_file):
         """
@@ -394,38 +394,45 @@ class VideoLRPadAlgorithm(Algorithm):
         # training_features[0] - training features for the REAL class.
         # training_features[1] - training features for the ATTACK class.
 
-        if self.subsample_videos_flag: # subsample videos of the real class
+        if self.subsample_videos_flag:  # subsample videos of the real class
 
-            real = self.convert_list_of_frame_cont_to_array( self.subsample_train_videos(training_features[0], self.video_subsampling_step) ) # output is array
+            real = self.convert_list_of_frame_cont_to_array(
+                self.subsample_train_videos(
+                    training_features[0],
+                    self.video_subsampling_step))  # output is array
 
         else:
 
-            real = self.convert_list_of_frame_cont_to_array(training_features[0]) # output is array
+            real = self.convert_list_of_frame_cont_to_array(
+                training_features[0])  # output is array
 
         if self.subsample_train_data_flag:
 
-            real = real[range(0,len(real), self.subsampling_step), :]
+            real = real[range(0, len(real), self.subsampling_step), :]
 
-        if self.subsample_videos_flag: # subsample videos of the real class
+        if self.subsample_videos_flag:  # subsample videos of the real class
 
-            attack = self.convert_list_of_frame_cont_to_array( self.subsample_train_videos(training_features[1], self.video_subsampling_step) ) # output is array
+            attack = self.convert_list_of_frame_cont_to_array(
+                self.subsample_train_videos(
+                    training_features[1],
+                    self.video_subsampling_step))  # output is array
 
         else:
 
-            attack = self.convert_list_of_frame_cont_to_array(training_features[1]) # output is array
+            attack = self.convert_list_of_frame_cont_to_array(
+                training_features[1])  # output is array
 
         if self.subsample_train_data_flag:
 
-            attack = attack[range(0,len(attack), self.subsampling_step), :]
+            attack = attack[range(0, len(attack), self.subsampling_step), :]
 
         # Train the LR machine and get normalizers:
-        machine, features_mean, features_std = self.train_lr(real = real,
-                                                             attack = attack,
-                                                             C = self.C)
+        machine, features_mean, features_std = self.train_lr(
+            real=real, attack=attack, C=self.C)
 
         # Save the LR machine and normalizers:
-        self.save_lr_machine_and_mean_std(projector_file, machine, features_mean, features_std)
-
+        self.save_lr_machine_and_mean_std(projector_file, machine,
+                                          features_mean, features_std)
 
     #==========================================================================
     def load_lr_machine_and_mean_std(self, projector_file):
@@ -451,13 +458,14 @@ class VideoLRPadAlgorithm(Algorithm):
             Standart deviation of the features.
         """
 
-        f = bob.io.base.HDF5File(projector_file, 'r') # file to read the machine from
+        f = bob.io.base.HDF5File(projector_file,
+                                 'r')  # file to read the machine from
 
         # initialize the machine:
         machine = linear_model.LogisticRegression()
 
         # set the params of the machine:
-        for key in self.lr_param_keys: # ["C", "classes_", "coef_", "intercept_"]
+        for key in self.lr_param_keys:  # ["C", "classes_", "coef_", "intercept_"]
 
             data = f.read(key)
 
@@ -471,7 +479,6 @@ class VideoLRPadAlgorithm(Algorithm):
 
         return machine, features_mean, features_std
 
-
     #==========================================================================
     def load_projector(self, projector_file):
         """
@@ -496,7 +503,8 @@ class VideoLRPadAlgorithm(Algorithm):
             ``load_cascade_of_machines`` methods of this class for more details.
         """
 
-        lr_machine, features_mean, features_std = self.load_lr_machine_and_mean_std(projector_file)
+        lr_machine, features_mean, features_std = self.load_lr_machine_and_mean_std(
+            projector_file)
 
         self.lr_machine = lr_machine
 
@@ -504,7 +512,6 @@ class VideoLRPadAlgorithm(Algorithm):
 
         self.features_std = features_std
 
-
     #==========================================================================
     def project(self, feature):
         """
@@ -537,7 +544,9 @@ class VideoLRPadAlgorithm(Algorithm):
         """
 
         # 1. Convert input array to numpy array if necessary.
-        if isinstance(feature, FrameContainer): # if FrameContainer convert to 2D numpy array
+        if isinstance(
+                feature,
+                FrameContainer):  # if FrameContainer convert to 2D numpy array
 
             features_array = self.convert_frame_cont_to_array(feature)
 
@@ -545,13 +554,13 @@ class VideoLRPadAlgorithm(Algorithm):
 
             features_array = feature
 
-        features_array_norm, _, _ = self.mean_std_normalize(features_array, self.features_mean, self.features_std)
+        features_array_norm, _, _ = self.mean_std_normalize(
+            features_array, self.features_mean, self.features_std)
 
-        scores = self.lr_machine.predict_proba( features_array_norm )[:,0]
+        scores = self.lr_machine.predict_proba(features_array_norm)[:, 0]
 
         return scores
 
-
     #==========================================================================
     def score(self, toscore):
         """
@@ -580,10 +589,6 @@ class VideoLRPadAlgorithm(Algorithm):
 
         else:
 
-            score = [np.mean( toscore )] # compute a single score per video
+            score = [np.mean(toscore)]  # compute a single score per video
 
         return score
-
-
-
-
diff --git a/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py b/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
index 6d4b70e1..ac5a09c7 100644
--- a/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
+++ b/bob/pad/face/algorithm/VideoSvmPadAlgorithm.py
@@ -12,7 +12,6 @@ Created on Wed May 17 09:43:09 2017
 from bob.pad.base.algorithm import Algorithm
 from bob.bio.video.utils import FrameContainer
 
-
 import itertools as it
 
 import numpy as np
@@ -23,7 +22,6 @@ import bob.io.base
 
 import os
 
-
 # ==============================================================================
 # Main body :
 
@@ -79,30 +77,34 @@ class VideoSvmPadAlgorithm(Algorithm):
         Default: 50000.
     """
 
-    def __init__(self,
-                 machine_type='C_SVC',
-                 kernel_type='RBF',
-                 n_samples=10000,
-                 trainer_grid_search_params={'cost': [2 ** p for p in range(-5, 16, 2)],
-                                             'gamma': [2 ** p for p in range(-15, 4, 2)]},
-                 mean_std_norm_flag=False,
-                 frame_level_scores_flag=False,
-                 save_debug_data_flag=True,
-                 reduced_train_data_flag=False,
-                 n_train_samples=50000):
-
-        Algorithm.__init__(self,
-                           machine_type=machine_type,
-                           kernel_type=kernel_type,
-                           n_samples=n_samples,
-                           trainer_grid_search_params=trainer_grid_search_params,
-                           mean_std_norm_flag=mean_std_norm_flag,
-                           frame_level_scores_flag=frame_level_scores_flag,
-                           save_debug_data_flag=save_debug_data_flag,
-                           reduced_train_data_flag=reduced_train_data_flag,
-                           n_train_samples=n_train_samples,
-                           performs_projection=True,
-                           requires_projector_training=True)
+    def __init__(
+            self,
+            machine_type='C_SVC',
+            kernel_type='RBF',
+            n_samples=10000,
+            trainer_grid_search_params={
+                'cost': [2**p for p in range(-5, 16, 2)],
+                'gamma': [2**p for p in range(-15, 4, 2)]
+            },
+            mean_std_norm_flag=False,
+            frame_level_scores_flag=False,
+            save_debug_data_flag=True,
+            reduced_train_data_flag=False,
+            n_train_samples=50000):
+
+        Algorithm.__init__(
+            self,
+            machine_type=machine_type,
+            kernel_type=kernel_type,
+            n_samples=n_samples,
+            trainer_grid_search_params=trainer_grid_search_params,
+            mean_std_norm_flag=mean_std_norm_flag,
+            frame_level_scores_flag=frame_level_scores_flag,
+            save_debug_data_flag=save_debug_data_flag,
+            reduced_train_data_flag=reduced_train_data_flag,
+            n_train_samples=n_train_samples,
+            performs_projection=True,
+            requires_projector_training=True)
 
         self.machine_type = machine_type
         self.kernel_type = kernel_type
@@ -169,7 +171,9 @@ class VideoSvmPadAlgorithm(Algorithm):
             An array containing features for all samples and frames.
         """
 
-        if isinstance(features[0], FrameContainer):  # if FrameContainer convert to 2D numpy array
+        if isinstance(
+                features[0],
+                FrameContainer):  # if FrameContainer convert to 2D numpy array
             return self.convert_list_of_frame_cont_to_array(features)
         else:
             return np.vstack(features)
@@ -197,7 +201,8 @@ class VideoSvmPadAlgorithm(Algorithm):
         feature_vectors = []
 
         for frame_container in frame_containers:
-            video_features_array = self.convert_frame_cont_to_array(frame_container)
+            video_features_array = self.convert_frame_cont_to_array(
+                frame_container)
 
             feature_vectors.append(video_features_array)
 
@@ -224,8 +229,11 @@ class VideoSvmPadAlgorithm(Algorithm):
 
         varNames = sorted(input_dict)
 
-        combinations = [dict(zip(varNames, prod)) for prod in
-                        it.product(*(input_dict[varName] for varName in varNames))]
+        combinations = [
+            dict(zip(varNames, prod))
+            for prod in it.product(*(input_dict[varName]
+                                     for varName in varNames))
+        ]
 
         return combinations
 
@@ -257,7 +265,8 @@ class VideoSvmPadAlgorithm(Algorithm):
 
             uniform_step = np.int(features.shape[0] / n_samples)
 
-            features_subset = features[0: np.int(uniform_step * n_samples): uniform_step, :]
+            features_subset = features[0:np.int(uniform_step * n_samples):
+                                       uniform_step, :]
 
         return features_subset
 
@@ -323,13 +332,14 @@ class VideoSvmPadAlgorithm(Algorithm):
 
         half_samples_num = np.int(features.shape[0] / 2)
 
-        features_train = features[0: half_samples_num, :]
-        features_cv = features[half_samples_num: 2 * half_samples_num + 1, :]
+        features_train = features[0:half_samples_num, :]
+        features_cv = features[half_samples_num:2 * half_samples_num + 1, :]
 
         return features_train, features_cv
 
     # ==========================================================================
-    def prepare_data_for_hyper_param_grid_search(self, training_features, n_samples):
+    def prepare_data_for_hyper_param_grid_search(self, training_features,
+                                                 n_samples):
         """
         This function converts a list of all training features returned by ``read_features``
         method of the extractor to the subsampled train and cross-validation arrays for both
@@ -369,9 +379,11 @@ class VideoSvmPadAlgorithm(Algorithm):
         """
 
         # training_features[0] - training features for the REAL class.
-        real = self.convert_and_prepare_features(training_features[0])  # output is array
+        real = self.convert_and_prepare_features(
+            training_features[0])  # output is array
         # training_features[1] - training features for the ATTACK class.
-        attack = self.convert_and_prepare_features(training_features[1])  # output is array
+        attack = self.convert_and_prepare_features(
+            training_features[1])  # output is array
 
         # uniformly select subsets of features:
         real_subset = self.select_uniform_data_subset(real, n_samples)
@@ -412,12 +424,16 @@ class VideoSvmPadAlgorithm(Algorithm):
 
         samples_num = len(labels_real) + len(labels_attack)
 
-        precision = (np.sum(labels_real == 1) + np.sum(labels_attack == -1)).astype(np.float) / samples_num
+        precision = (np.sum(labels_real == 1) + np.sum(labels_attack == -1)
+                     ).astype(np.float) / samples_num
 
         return precision
 
     # ==========================================================================
-    def mean_std_normalize(self, features, features_mean=None, features_std=None):
+    def mean_std_normalize(self,
+                           features,
+                           features_mean=None,
+                           features_std=None):
         """
         The features in the input 2D array are mean-std normalized.
         The rows are samples, the columns are features. If ``features_mean``
@@ -469,7 +485,12 @@ class VideoSvmPadAlgorithm(Algorithm):
         return features_norm, features_mean, features_std
 
     # ==========================================================================
-    def norm_train_cv_data(self, real_train, real_cv, attack_train, attack_cv, one_class_flag=False):
+    def norm_train_cv_data(self,
+                           real_train,
+                           real_cv,
+                           attack_train,
+                           attack_cv,
+                           one_class_flag=False):
         """
         Mean-std normalization of train and cross-validation data arrays.
 
@@ -510,39 +531,52 @@ class VideoSvmPadAlgorithm(Algorithm):
 
             features_train = np.vstack([real_train, attack_train])
 
-            features_train_norm, features_mean, features_std = self.mean_std_normalize(features_train)
+            features_train_norm, features_mean, features_std = self.mean_std_normalize(
+                features_train)
 
             real_train_norm = features_train_norm[0:real_train.shape[0], :]
 
             attack_train_norm = features_train_norm[real_train.shape[0]:, :]
 
-            real_cv_norm, _, _ = self.mean_std_normalize(real_cv, features_mean, features_std)
+            real_cv_norm, _, _ = self.mean_std_normalize(
+                real_cv, features_mean, features_std)
 
-            attack_cv_norm, _, _ = self.mean_std_normalize(attack_cv, features_mean, features_std)
+            attack_cv_norm, _, _ = self.mean_std_normalize(
+                attack_cv, features_mean, features_std)
 
         else:  # one-class SVM case
 
             # only real class used for training in one class SVM:
-            real_train_norm, features_mean, features_std = self.mean_std_normalize(real_train)
+            real_train_norm, features_mean, features_std = self.mean_std_normalize(
+                real_train)
 
-            attack_train_norm, _, _ = self.mean_std_normalize(attack_train, features_mean, features_std)
+            attack_train_norm, _, _ = self.mean_std_normalize(
+                attack_train, features_mean, features_std)
 
-            real_cv_norm, _, _ = self.mean_std_normalize(real_cv, features_mean, features_std)
+            real_cv_norm, _, _ = self.mean_std_normalize(
+                real_cv, features_mean, features_std)
 
-            attack_cv_norm, _, _ = self.mean_std_normalize(attack_cv, features_mean, features_std)
+            attack_cv_norm, _, _ = self.mean_std_normalize(
+                attack_cv, features_mean, features_std)
 
         return real_train_norm, real_cv_norm, attack_train_norm, attack_cv_norm
 
     # ==========================================================================
-    def train_svm(self, training_features, n_samples=10000,
-                  machine_type='C_SVC', kernel_type='RBF',
-                  trainer_grid_search_params={'cost': [2 ** p for p in range(-5, 16, 2)],
-                                              'gamma': [2 ** p for p in range(-15, 4, 2)]},
-                  mean_std_norm_flag=False,
-                  projector_file="",
-                  save_debug_data_flag=True,
-                  reduced_train_data_flag=False,
-                  n_train_samples=50000):
+    def train_svm(
+            self,
+            training_features,
+            n_samples=10000,
+            machine_type='C_SVC',
+            kernel_type='RBF',
+            trainer_grid_search_params={
+                'cost': [2**p for p in range(-5, 16, 2)],
+                'gamma': [2**p for p in range(-15, 4, 2)]
+            },
+            mean_std_norm_flag=False,
+            projector_file="",
+            save_debug_data_flag=True,
+            reduced_train_data_flag=False,
+            n_train_samples=50000):
         """
         First, this function tunes the hyper-parameters of the SVM classifier using
         grid search on the sub-sets of training data. Train and cross-validation
@@ -602,49 +636,58 @@ class VideoSvmPadAlgorithm(Algorithm):
             A trained SVM machine.
         """
 
-        one_class_flag = (machine_type == 'ONE_CLASS')  # True if one-class SVM is used
+        one_class_flag = (
+            machine_type == 'ONE_CLASS')  # True if one-class SVM is used
 
         # get the data for the hyper-parameter grid-search:
-        real_train, real_cv, attack_train, attack_cv = self.prepare_data_for_hyper_param_grid_search(training_features,
-                                                                                                     n_samples)
+        real_train, real_cv, attack_train, attack_cv = self.prepare_data_for_hyper_param_grid_search(
+            training_features, n_samples)
 
         if mean_std_norm_flag:
             # normalize the data:
-            real_train, real_cv, attack_train, attack_cv = self.norm_train_cv_data(real_train, real_cv, attack_train,
-                                                                                   attack_cv,
-                                                                                   one_class_flag)
+            real_train, real_cv, attack_train, attack_cv = self.norm_train_cv_data(
+                real_train, real_cv, attack_train, attack_cv, one_class_flag)
 
-        precisions_cv = []  # for saving the precision on the cross-validation set
+        precisions_cv = [
+        ]  # for saving the precision on the cross-validation set
 
         precisions_train = []
 
         trainer_grid_search_params_list = self.combinations(
-            trainer_grid_search_params)  # list containing all combinations of params
+            trainer_grid_search_params
+        )  # list containing all combinations of params
 
         for trainer_grid_search_param in trainer_grid_search_params_list:
 
             # initialize the SVM trainer:
-            trainer = bob.learn.libsvm.Trainer(machine_type=machine_type,
-                                               kernel_type=kernel_type,
-                                               probability=True)
+            trainer = bob.learn.libsvm.Trainer(
+                machine_type=machine_type,
+                kernel_type=kernel_type,
+                probability=True)
 
             for key in trainer_grid_search_param.keys():
-                setattr(trainer, key, trainer_grid_search_param[key])  # set the params of trainer
+                setattr(trainer, key, trainer_grid_search_param[
+                    key])  # set the params of trainer
 
             if not (one_class_flag):  # two-class SVM case
 
-                data = [np.copy(real_train),
-                        np.copy(attack_train)]  # data used for training the machine in the grid-search
+                data = [
+                    np.copy(real_train),
+                    np.copy(attack_train)
+                ]  # data used for training the machine in the grid-search
 
             else:  # one class SVM case
 
-                data = [np.copy(real_train)]  # only real class is used for training
+                data = [np.copy(real_train)
+                        ]  # only real class is used for training
 
             machine = trainer.train(data)  # train the machine
 
-            precision_cv = self.comp_prediction_precision(machine, np.copy(real_cv), np.copy(attack_cv))
+            precision_cv = self.comp_prediction_precision(
+                machine, np.copy(real_cv), np.copy(attack_cv))
 
-            precision_train = self.comp_prediction_precision(machine, np.copy(real_train), np.copy(attack_train))
+            precision_train = self.comp_prediction_precision(
+                machine, np.copy(real_train), np.copy(attack_train))
 
             precisions_cv.append(precision_cv)
 
@@ -654,20 +697,23 @@ class VideoSvmPadAlgorithm(Algorithm):
             del machine
             del trainer
 
-        selected_params = trainer_grid_search_params_list[
-            np.argmax(precisions_cv)]  # best SVM parameters according to CV set
+        selected_params = trainer_grid_search_params_list[np.argmax(
+            precisions_cv)]  # best SVM parameters according to CV set
 
-        trainer = bob.learn.libsvm.Trainer(machine_type=machine_type,
-                                           kernel_type=kernel_type,
-                                           probability=True)
+        trainer = bob.learn.libsvm.Trainer(
+            machine_type=machine_type,
+            kernel_type=kernel_type,
+            probability=True)
 
         for key in selected_params.keys():
-            setattr(trainer, key, selected_params[key])  # set the params of trainer
+            setattr(trainer, key,
+                    selected_params[key])  # set the params of trainer
 
         # Save the data, which is usefull for debugging.
         if save_debug_data_flag:
 
-            debug_file = os.path.join(os.path.split(projector_file)[0], "debug_data.hdf5")
+            debug_file = os.path.join(
+                os.path.split(projector_file)[0], "debug_data.hdf5")
             debug_dict = {}
             debug_dict['precisions_train'] = precisions_train
             debug_dict['precisions_cv'] = precisions_cv
@@ -675,36 +721,44 @@ class VideoSvmPadAlgorithm(Algorithm):
             for key in selected_params.keys():
                 debug_dict[key] = selected_params[key]
 
-            f = bob.io.base.HDF5File(debug_file, 'w')  # open hdf5 file to save the debug data
+            f = bob.io.base.HDF5File(
+                debug_file, 'w')  # open hdf5 file to save the debug data
             for key in debug_dict.keys():
                 f.set(key, debug_dict[key])
             del f
 
         # training_features[0] - training features for the REAL class.
-        real = self.convert_and_prepare_features(training_features[0])  # output is array
+        real = self.convert_and_prepare_features(
+            training_features[0])  # output is array
         # training_features[1] - training features for the ATTACK class.
-        attack = self.convert_and_prepare_features(training_features[1])  # output is array
+        attack = self.convert_and_prepare_features(
+            training_features[1])  # output is array
 
         if mean_std_norm_flag:
             # Normalize the data:
             if not (one_class_flag):  # two-class SVM case
 
                 features = np.vstack([real, attack])
-                features_norm, features_mean, features_std = self.mean_std_normalize(features)
-                real = features_norm[0:real.shape[0], :]  # The array is now normalized
-                attack = features_norm[real.shape[0]:, :]  # The array is now normalized
+                features_norm, features_mean, features_std = self.mean_std_normalize(
+                    features)
+                real = features_norm[0:real.shape[
+                    0], :]  # The array is now normalized
+                attack = features_norm[real.shape[
+                    0]:, :]  # The array is now normalized
 
             else:  # one-class SVM case
 
                 real, features_mean, features_std = self.mean_std_normalize(
                     real)  # use only real class to compute normalizers
-                attack = self.mean_std_normalize(attack, features_mean, features_std)
+                attack = self.mean_std_normalize(attack, features_mean,
+                                                 features_std)
                 # ``real`` and ``attack`` arrays are now normalizaed
 
         if reduced_train_data_flag:
             # uniformly select subsets of features:
             real = self.select_quasi_uniform_data_subset(real, n_train_samples)
-            attack = self.select_quasi_uniform_data_subset(attack, n_train_samples)
+            attack = self.select_quasi_uniform_data_subset(
+                attack, n_train_samples)
 
         if not (one_class_flag):  # two-class SVM case
 
@@ -743,18 +797,20 @@ class VideoSvmPadAlgorithm(Algorithm):
             This file should be readable with the :py:meth:`load_projector` function.
         """
 
-        machine = self.train_svm(training_features=training_features,
-                                 n_samples=self.n_samples,
-                                 machine_type=self.machine_type,
-                                 kernel_type=self.kernel_type,
-                                 trainer_grid_search_params=self.trainer_grid_search_params,
-                                 mean_std_norm_flag=self.mean_std_norm_flag,
-                                 projector_file=projector_file,
-                                 save_debug_data_flag=self.save_debug_data_flag,
-                                 reduced_train_data_flag=self.reduced_train_data_flag,
-                                 n_train_samples=self.n_train_samples)
+        machine = self.train_svm(
+            training_features=training_features,
+            n_samples=self.n_samples,
+            machine_type=self.machine_type,
+            kernel_type=self.kernel_type,
+            trainer_grid_search_params=self.trainer_grid_search_params,
+            mean_std_norm_flag=self.mean_std_norm_flag,
+            projector_file=projector_file,
+            save_debug_data_flag=self.save_debug_data_flag,
+            reduced_train_data_flag=self.reduced_train_data_flag,
+            n_train_samples=self.n_train_samples)
 
-        f = bob.io.base.HDF5File(projector_file, 'w')  # open hdf5 file to save to
+        f = bob.io.base.HDF5File(projector_file,
+                                 'w')  # open hdf5 file to save to
 
         machine.save(f)  # save the machine and normalization parameters
 
@@ -812,7 +868,9 @@ class VideoSvmPadAlgorithm(Algorithm):
             readable with the ``read_feature`` function.
         """
 
-        if isinstance(feature, FrameContainer):  # if FrameContainer convert to 2D numpy array
+        if isinstance(
+                feature,
+                FrameContainer):  # if FrameContainer convert to 2D numpy array
 
             features_array = self.convert_frame_cont_to_array(feature)
 
@@ -822,11 +880,13 @@ class VideoSvmPadAlgorithm(Algorithm):
 
         if not (self.machine_type == 'ONE_CLASS'):  # two-class SVM case
 
-            probabilities = self.machine.predict_class_and_probabilities(features_array)[1]
+            probabilities = self.machine.predict_class_and_probabilities(
+                features_array)[1]
 
         else:
 
-            probabilities = self.machine.predict_class_and_scores(features_array)[1]
+            probabilities = self.machine.predict_class_and_scores(
+                features_array)[1]
 
         return probabilities
 
@@ -858,7 +918,8 @@ class VideoSvmPadAlgorithm(Algorithm):
 
         if self.frame_level_scores_flag:
 
-            score = toscore[:, 0]  # here score is a 1D array containing scores for each frame
+            score = toscore[:,
+                            0]  # here score is a 1D array containing scores for each frame
 
         else:
 
@@ -887,7 +948,8 @@ class VideoSvmPadAlgorithm(Algorithm):
             A list containing the scores.
         """
 
-        scores = self.score(toscore)  # returns float score or 1D array of scores
+        scores = self.score(
+            toscore)  # returns float score or 1D array of scores
 
         if isinstance(scores, np.float):  # if a single score
 
diff --git a/bob/pad/face/algorithm/__init__.py b/bob/pad/face/algorithm/__init__.py
index eeb27d59..75d02e10 100644
--- a/bob/pad/face/algorithm/__init__.py
+++ b/bob/pad/face/algorithm/__init__.py
@@ -3,6 +3,7 @@ from .VideoCascadeSvmPadAlgorithm import VideoCascadeSvmPadAlgorithm
 from .VideoLRPadAlgorithm import VideoLRPadAlgorithm
 from .VideoGmmPadAlgorithm import VideoGmmPadAlgorithm
 
+
 def __appropriate__(*args):
     """Says object was actually declared here, and not in the import module.
     Fixing sphinx warnings of not being able to find classes, when path is
diff --git a/bob/pad/face/config/aggregated_db.py b/bob/pad/face/config/aggregated_db.py
index 89b4b641..fa9d93a6 100644
--- a/bob/pad/face/config/aggregated_db.py
+++ b/bob/pad/face/config/aggregated_db.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """Aggregated Db is a database for face PAD experiments.
 This database aggregates the data from 3 publicly available data-sets:
 `REPLAYATTACK`_, `REPLAY-MOBILE`_ and `MSU MFSD`_.
@@ -20,7 +19,7 @@ from bob.pad.face.database import AggregatedDbPadDatabase
 ORIGINAL_DIRECTORY = "[YOUR_AGGREGATED_DB_DIRECTORIES]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-ORIGINAL_EXTENSION = ".mov" # extension of the data files
+ORIGINAL_EXTENSION = ".mov"  # extension of the data files
 
 database = AggregatedDbPadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/config/algorithm/video_cascade_svm_pad_algorithm.py b/bob/pad/face/config/algorithm/video_cascade_svm_pad_algorithm.py
index f0ee742e..5856ef3d 100644
--- a/bob/pad/face/config/algorithm/video_cascade_svm_pad_algorithm.py
+++ b/bob/pad/face/config/algorithm/video_cascade_svm_pad_algorithm.py
@@ -2,7 +2,6 @@
 
 from bob.pad.face.algorithm import VideoCascadeSvmPadAlgorithm
 
-
 #=======================================================================================
 # Define instances here:
 
@@ -13,12 +12,13 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n2_gamma_02 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n2_gamma_02 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -27,12 +27,13 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n2_gamma_01 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n2_gamma_01 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -41,12 +42,13 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n2_gamma_005 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n2_gamma_005 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -55,13 +57,13 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n2_gamma_001 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_n2_gamma_001 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 
@@ -72,12 +74,13 @@ N = 10
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n10_gamma_01 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n10_gamma_01 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -86,12 +89,13 @@ N = 10
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n10_gamma_005 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n10_gamma_005 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -100,12 +104,13 @@ N = 10
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n10_gamma_001 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n10_gamma_001 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -114,13 +119,13 @@ N = 10
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n10_gamma_0005 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_n10_gamma_0005 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 
@@ -131,12 +136,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_05 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n20_gamma_05 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -145,12 +151,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_02 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n20_gamma_02 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -159,12 +166,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_01 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n20_gamma_01 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -173,12 +181,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_005 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n20_gamma_005 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -187,12 +196,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_001 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_n20_gamma_001 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -201,13 +211,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_0005 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_n20_gamma_0005 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
@@ -216,13 +226,13 @@ N = 20
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n20_gamma_0001 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_n20_gamma_0001 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 
@@ -233,13 +243,13 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = False
 
-algorithm_n2_gamma_01_video_level = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_n2_gamma_01_video_level = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 
@@ -252,15 +262,10 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_n2_two_class_svm_c1_gamma_001 = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                                                      kernel_type = KERNEL_TYPE,
-                                                                      svm_kwargs = TRAINER_GRID_SEARCH_PARAMS,
-                                                                      N = N,
-                                                                      pos_scores_slope = POS_SCORES_SLOPE,
-                                                                      frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
-
-
-
-
-
+algorithm_n2_two_class_svm_c1_gamma_001 = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=TRAINER_GRID_SEARCH_PARAMS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
diff --git a/bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py b/bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py
index e46e285d..67787390 100644
--- a/bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py
+++ b/bob/pad/face/config/algorithm/video_gmm_pad_algorithm.py
@@ -2,64 +2,62 @@
 
 from bob.pad.face.algorithm import VideoGmmPadAlgorithm
 
-
 #=======================================================================================
 # Define instances here:
 
 N_COMPONENTS = 2
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_2 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_2 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 3
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_3 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_3 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 4
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_4 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_4 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 5
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_5 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_5 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 6
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_6 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_6 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 7
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_7 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_7 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 8
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_8 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_8 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 9
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_9 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_9 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 10
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_10 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_gmm_10 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 # above 10 Gaussians:
@@ -67,33 +65,32 @@ algorithm_gmm_10 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
 N_COMPONENTS = 12
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_12 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_12 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 14
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_14 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_14 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 16
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_16 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_16 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 18
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_18 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_18 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 20
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_20 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_gmm_20 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 # above 20 Gaussians:
@@ -101,39 +98,38 @@ algorithm_gmm_20 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
 N_COMPONENTS = 25
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_25 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_25 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 30
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_30 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_30 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 35
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_35 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_35 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 40
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_40 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_40 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 45
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_45 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_45 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
+algorithm_gmm_50 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 # above 50 Gaussians:
@@ -141,34 +137,32 @@ algorithm_gmm_50 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
 N_COMPONENTS = 60
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_60 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_60 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 70
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_70 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_70 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 80
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_80 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_80 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 90
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_90 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_90 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 100
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_100 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
-
+algorithm_gmm_100 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 #=======================================================================================
 # 50 Gaussians, different random seeds:
@@ -177,81 +171,88 @@ N_COMPONENTS = 50
 RANDOM_STATE = 0
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_0 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_0 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 1
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_1 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_1 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 2
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_2 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_2 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 3
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_3 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_3 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 4
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_4 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_4 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 5
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_5 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_5 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 6
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_6 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_6 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 7
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_7 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_7 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 8
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_8 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm_gmm_50_8 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
 N_COMPONENTS = 50
 RANDOM_STATE = 9
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm_gmm_50_9 = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                        random_state = RANDOM_STATE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
-
-
+algorithm_gmm_50_9 = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
diff --git a/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py b/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
index 639279d8..10fd1a70 100644
--- a/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
+++ b/bob/pad/face/config/algorithm/video_svm_pad_algorithm.py
@@ -2,7 +2,6 @@
 
 from bob.pad.face.algorithm import VideoSvmPadAlgorithm
 
-
 #=======================================================================================
 # Define instances here:
 
@@ -10,31 +9,40 @@ machine_type = 'C_SVC'
 kernel_type = 'RBF'
 n_samples = 10000
 # trainer_grid_search_params = {'cost': [2**p for p in range(-5, 16, 2)], 'gamma': [2**p for p in range(-15, 4, 2)]}
-trainer_grid_search_params = {'cost': [2**p for p in range(-3, 14, 2)], 'gamma': [2**p for p in range(-15, 0, 2)]}
+trainer_grid_search_params = {
+    'cost': [2**p for p in range(-3, 14, 2)],
+    'gamma': [2**p for p in range(-15, 0, 2)]
+}
 mean_std_norm_flag = True
-frame_level_scores_flag = False # one score per video(!) in this case
-
-video_svm_pad_algorithm_10k_grid_mean_std = VideoSvmPadAlgorithm(machine_type = machine_type,
-                                                                 kernel_type = kernel_type,
-                                                                 n_samples = n_samples,
-                                                                 trainer_grid_search_params = trainer_grid_search_params,
-                                                                 mean_std_norm_flag = mean_std_norm_flag,
-                                                                 frame_level_scores_flag = frame_level_scores_flag)
-
-frame_level_scores_flag = True # one score per frame(!) in this case
-
-video_svm_pad_algorithm_10k_grid_mean_std_frame_level = VideoSvmPadAlgorithm(machine_type = machine_type,
-                                                                             kernel_type = kernel_type,
-                                                                             n_samples = n_samples,
-                                                                             trainer_grid_search_params = trainer_grid_search_params,
-                                                                             mean_std_norm_flag = mean_std_norm_flag,
-                                                                             frame_level_scores_flag = frame_level_scores_flag)
-
-trainer_grid_search_params = {'cost': [1], 'gamma': [0]} # set the default LibSVM parameters
-
-video_svm_pad_algorithm_default_svm_param_mean_std_frame_level = VideoSvmPadAlgorithm(machine_type = machine_type,
-                                                                                      kernel_type = kernel_type,
-                                                                                      n_samples = n_samples,
-                                                                                      trainer_grid_search_params = trainer_grid_search_params,
-                                                                                      mean_std_norm_flag = mean_std_norm_flag,
-                                                                                      frame_level_scores_flag = frame_level_scores_flag)
+frame_level_scores_flag = False  # one score per video(!) in this case
+
+video_svm_pad_algorithm_10k_grid_mean_std = VideoSvmPadAlgorithm(
+    machine_type=machine_type,
+    kernel_type=kernel_type,
+    n_samples=n_samples,
+    trainer_grid_search_params=trainer_grid_search_params,
+    mean_std_norm_flag=mean_std_norm_flag,
+    frame_level_scores_flag=frame_level_scores_flag)
+
+frame_level_scores_flag = True  # one score per frame(!) in this case
+
+video_svm_pad_algorithm_10k_grid_mean_std_frame_level = VideoSvmPadAlgorithm(
+    machine_type=machine_type,
+    kernel_type=kernel_type,
+    n_samples=n_samples,
+    trainer_grid_search_params=trainer_grid_search_params,
+    mean_std_norm_flag=mean_std_norm_flag,
+    frame_level_scores_flag=frame_level_scores_flag)
+
+trainer_grid_search_params = {
+    'cost': [1],
+    'gamma': [0]
+}  # set the default LibSVM parameters
+
+video_svm_pad_algorithm_default_svm_param_mean_std_frame_level = VideoSvmPadAlgorithm(
+    machine_type=machine_type,
+    kernel_type=kernel_type,
+    n_samples=n_samples,
+    trainer_grid_search_params=trainer_grid_search_params,
+    mean_std_norm_flag=mean_std_norm_flag,
+    frame_level_scores_flag=frame_level_scores_flag)
diff --git a/bob/pad/face/config/database/aggregated_db.py b/bob/pad/face/config/database/aggregated_db.py
index e12abfcd..65e5d20a 100644
--- a/bob/pad/face/config/database/aggregated_db.py
+++ b/bob/pad/face/config/database/aggregated_db.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """Aggregated Db is a database for face PAD experiments.
 This database aggregates the data from 3 publicly available data-sets:
 `REPLAYATTACK`_, `REPLAY-MOBILE`_ and `MSU MFSD`_.
@@ -20,7 +19,7 @@ from bob.pad.face.database import AggregatedDbPadDatabase
 original_directory = "[YOUR_AGGREGATED_DB_DIRECTORIES]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-original_extension = ".mov" # extension of the data files
+original_extension = ".mov"  # extension of the data files
 
 database = AggregatedDbPadDatabase(
     protocol='grandtest',
@@ -47,4 +46,4 @@ must be separated with a space. See the following note with an example of
 .. note::
 
     [YOUR_AGGREGATED_DB_DIRECTORIES] = <PATH_TO_REPLAY_ATTACK> <PATH_TO_REPLAY_MOBILE> <PATH_TO_MSU_MFSD>
-"""
\ No newline at end of file
+"""
diff --git a/bob/pad/face/config/database/mifs.py b/bob/pad/face/config/database/mifs.py
index d5db7dbc..c29b3638 100644
--- a/bob/pad/face/config/database/mifs.py
+++ b/bob/pad/face/config/database/mifs.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`MIFS`_ is a face makeup spoofing database adapted for face PAD experiments.
 
 Database assembled from a dataset consisting of 107 makeup-transformations taken
@@ -20,14 +19,12 @@ the link.
 
 from bob.pad.face.database.mifs import MIFSPadDatabase
 
-
 # Directory where the data files are stored.
 # This directory is given in the .bob_bio_databases.txt file located in your home directory
 original_directory = "[YOUR_MIFS_DATABASE_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-original_extension = ".jpg" # extension of the data files
-
+original_extension = ".jpg"  # extension of the data files
 
 database = MIFSPadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/config/database/msu_mfsd.py b/bob/pad/face/config/database/msu_mfsd.py
index ec2cd20a..c8726653 100644
--- a/bob/pad/face/config/database/msu_mfsd.py
+++ b/bob/pad/face/config/database/msu_mfsd.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`MSU MFSD`_ is a database for face PAD experiments.
 
 Database created at MSU, for face-PAD experiments. The public version of the database contains
@@ -18,13 +17,12 @@ the link.
 
 from bob.pad.face.database import MsuMfsdPadDatabase
 
-
 # Directory where the data files are stored.
 # This directory is given in the .bob_bio_databases.txt file located in your home directory
 original_directory = "[YOUR_MSU_MFSD_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-original_extension = "none" # extension is not used to load the data in the HLDI of this database
+original_extension = "none"  # extension is not used to load the data in the HLDI of this database
 
 database = MsuMfsdPadDatabase(
     protocol='grandtest',
@@ -45,4 +43,4 @@ Notice that ``original_directory`` is set to ``[YOUR_MSU_MFSD_DIRECTORY]``.
 You must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this
 value to the place where you actually installed the Replay-Mobile Database, as
 explained in the section :ref:`bob.pad.face.baselines`.
-"""
\ No newline at end of file
+"""
diff --git a/bob/pad/face/config/database/replay_attack.py b/bob/pad/face/config/database/replay_attack.py
index 197355ce..3a6c4019 100644
--- a/bob/pad/face/config/database/replay_attack.py
+++ b/bob/pad/face/config/database/replay_attack.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`Replayattack`_ is a database for face PAD experiments.
 
 The Replay-Attack Database for face spoofing consists of 1300 video clips of photo and video attack attempts to 50 clients,
@@ -14,14 +13,12 @@ the link.
 
 from bob.pad.face.database import ReplayPadDatabase
 
-
 # Directory where the data files are stored.
 # This directory is given in the .bob_bio_databases.txt file located in your home directory
 original_directory = "[YOUR_REPLAY_ATTACK_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-original_extension = ".mov" # extension of the data files
-
+original_extension = ".mov"  # extension of the data files
 
 database = ReplayPadDatabase(
     protocol='grandtest',
@@ -42,4 +39,4 @@ Notice that ``original_directory`` is set to ``[YOUR_REPLAY_ATTACK_DIRECTORY]``.
 You must make sure to create ``${HOME}/.bob_bio_databases.txt`` setting this
 value to the place where you actually installed the Replayattack Database, as
 explained in the section :ref:`bob.pad.face.baselines`.
-"""
\ No newline at end of file
+"""
diff --git a/bob/pad/face/config/database/replay_mobile.py b/bob/pad/face/config/database/replay_mobile.py
index 9f1ae949..58f0c0e4 100644
--- a/bob/pad/face/config/database/replay_mobile.py
+++ b/bob/pad/face/config/database/replay_mobile.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`Replay-Mobile`_ is a database for face PAD experiments.
 
 The Replay-Mobile Database for face spoofing consists of 1030 video clips of photo and video attack attempts to 40 clients,
@@ -17,14 +16,12 @@ the link.
 
 from bob.pad.face.database import ReplayMobilePadDatabase
 
-
 # Directory where the data files are stored.
 # This directory is given in the .bob_bio_databases.txt file located in your home directory
 original_directory = "[YOUR_REPLAY_MOBILE_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-original_extension = ".mov" # extension of the data files
-
+original_extension = ".mov"  # extension of the data files
 
 database = ReplayMobilePadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/config/extractor/frame_diff_features.py b/bob/pad/face/config/extractor/frame_diff_features.py
index 00fcccc1..cd148678 100644
--- a/bob/pad/face/config/extractor/frame_diff_features.py
+++ b/bob/pad/face/config/extractor/frame_diff_features.py
@@ -2,15 +2,11 @@
 
 from bob.pad.face.extractor import FrameDiffFeatures
 
-
 #=======================================================================================
 # Define instances here:
 
-window_size=20
-overlap=0
-
-frame_diff_feat_extr_w20_over0 = FrameDiffFeatures(window_size=window_size,
-                                                  overlap=overlap)
-
-
+window_size = 20
+overlap = 0
 
+frame_diff_feat_extr_w20_over0 = FrameDiffFeatures(
+    window_size=window_size, overlap=overlap)
diff --git a/bob/pad/face/config/extractor/video_lbp_histogram.py b/bob/pad/face/config/extractor/video_lbp_histogram.py
index ee0ec7f7..db62b37a 100644
--- a/bob/pad/face/config/extractor/video_lbp_histogram.py
+++ b/bob/pad/face/config/extractor/video_lbp_histogram.py
@@ -2,20 +2,20 @@
 
 from bob.pad.face.extractor import VideoLBPHistogram
 
-
 #=======================================================================================
 # Define instances here:
 
-lbptype='uniform'
-elbptype='regular'
-rad=1
-neighbors=8
-circ=False
-dtype=None
+lbptype = 'uniform'
+elbptype = 'regular'
+rad = 1
+neighbors = 8
+circ = False
+dtype = None
 
-video_lbp_histogram_extractor_n8r1_uniform = VideoLBPHistogram(lbptype=lbptype,
-                                                               elbptype=elbptype,
-                                                               rad=rad,
-                                                               neighbors=neighbors,
-                                                               circ=circ,
-                                                               dtype=dtype)
+video_lbp_histogram_extractor_n8r1_uniform = VideoLBPHistogram(
+    lbptype=lbptype,
+    elbptype=elbptype,
+    rad=rad,
+    neighbors=neighbors,
+    circ=circ,
+    dtype=dtype)
diff --git a/bob/pad/face/config/extractor/video_quality_measure.py b/bob/pad/face/config/extractor/video_quality_measure.py
index f6910cc8..f5cc5598 100644
--- a/bob/pad/face/config/extractor/video_quality_measure.py
+++ b/bob/pad/face/config/extractor/video_quality_measure.py
@@ -2,14 +2,12 @@
 
 from bob.pad.face.extractor import VideoQualityMeasure
 
-
 #=======================================================================================
 # Define instances here:
 
-galbally=True
-msu=True
-dtype=None
+galbally = True
+msu = True
+dtype = None
 
-video_quality_measure_galbally_msu = VideoQualityMeasure(galbally=galbally,
-                                                         msu=msu,
-                                                         dtype=dtype)
+video_quality_measure_galbally_msu = VideoQualityMeasure(
+    galbally=galbally, msu=msu, dtype=dtype)
diff --git a/bob/pad/face/config/frame_diff_svm.py b/bob/pad/face/config/frame_diff_svm.py
index 35c6d4f8..4fa07081 100644
--- a/bob/pad/face/config/frame_diff_svm.py
+++ b/bob/pad/face/config/frame_diff_svm.py
@@ -1,13 +1,11 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Frame Differences and SVM based face PAD baseline.
 The settings are tuned for the Replay-attack database.
 The idea of the algorithms is inherited from the following paper: [AM11]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'frame_diff_svm'
 """
@@ -18,19 +16,19 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import FrameDifference
 
-NUMBER_OF_FRAMES = None # process all frames
-CHECK_FACE_SIZE_FLAG = True # Check size of the face
-MIN_FACE_SIZE = 50 # Minimal size of the face to consider
+NUMBER_OF_FRAMES = None  # process all frames
+CHECK_FACE_SIZE_FLAG = True  # Check size of the face
+MIN_FACE_SIZE = 50  # Minimal size of the face to consider
 
-preprocessor = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
-                               check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                               min_face_size = MIN_FACE_SIZE)
+preprocessor = FrameDifference(
+    number_of_frames=NUMBER_OF_FRAMES,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE)
 """
 In the preprocessing stage the frame differences are computed for both facial and non-facial/background
 regions. In this case all frames of the input video are considered, which is defined by
@@ -39,17 +37,15 @@ are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor
 The preprocessing idea is introduced in [AM11]_.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import FrameDiffFeatures
 
-WINDOW_SIZE=20
-OVERLAP=0
+WINDOW_SIZE = 20
+OVERLAP = 0
 
-extractor = FrameDiffFeatures(window_size=WINDOW_SIZE,
-                              overlap=OVERLAP)
+extractor = FrameDiffFeatures(window_size=WINDOW_SIZE, overlap=OVERLAP)
 """
 In the feature extraction stage 5 features are extracted for all non-overlapping windows in
 the Frame Difference input signals. Five features are computed for each of windows in the
@@ -59,7 +55,6 @@ argument.
 The features are introduced in the following paper: [AM11]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -68,16 +63,20 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'C_SVC'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+TRAINER_GRID_SEARCH_PARAMS = {
+    'cost': [2**P for P in range(-3, 14, 2)],
+    'gamma': [2**P for P in range(-15, 0, 2)]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 """
 The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -87,8 +86,3 @@ The size of this subset is defined by ``n_samples`` parameter.
 
 The data is also mean-std normalized, ``mean_std_norm_flag = True``.
 """
-
-
-
-
-
diff --git a/bob/pad/face/config/frame_diff_svm_aggregated_db.py b/bob/pad/face/config/frame_diff_svm_aggregated_db.py
index 0379d881..1bd47a16 100644
--- a/bob/pad/face/config/frame_diff_svm_aggregated_db.py
+++ b/bob/pad/face/config/frame_diff_svm_aggregated_db.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Frame Differences and SVM based face PAD baseline.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
@@ -9,7 +8,6 @@ large data sets, such as Aggregated PAD database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'frame_diff_svm'
 """
@@ -20,19 +18,19 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import FrameDifference
 
-NUMBER_OF_FRAMES = None # process all frames
-CHECK_FACE_SIZE_FLAG = True # Check size of the face
-MIN_FACE_SIZE = 50 # Minimal size of the face to consider
+NUMBER_OF_FRAMES = None  # process all frames
+CHECK_FACE_SIZE_FLAG = True  # Check size of the face
+MIN_FACE_SIZE = 50  # Minimal size of the face to consider
 
-preprocessor = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
-                               check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                               min_face_size = MIN_FACE_SIZE)
+preprocessor = FrameDifference(
+    number_of_frames=NUMBER_OF_FRAMES,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE)
 """
 In the preprocessing stage the frame differences are computed for both facial and non-facial/background
 regions. In this case all frames of the input video are considered, which is defined by
@@ -41,17 +39,15 @@ are discarded. Both RGB and gray-scale videos are acceptable by the preprocessor
 The preprocessing idea is introduced in [AM11]_.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import FrameDiffFeatures
 
-WINDOW_SIZE=20
-OVERLAP=0
+WINDOW_SIZE = 20
+OVERLAP = 0
 
-extractor = FrameDiffFeatures(window_size=WINDOW_SIZE,
-                              overlap=OVERLAP)
+extractor = FrameDiffFeatures(window_size=WINDOW_SIZE, overlap=OVERLAP)
 """
 In the feature extraction stage 5 features are extracted for all non-overlapping windows in
 the Frame Difference input signals. Five features are computed for each of windows in the
@@ -61,7 +57,6 @@ argument.
 The features are introduced in the following paper: [AM11]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -70,22 +65,26 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'C_SVC'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-SAVE_DEBUG_DATA_FLAG = True    # save the data, which might be useful for debugging
-REDUCED_TRAIN_DATA_FLAG = True # reduce the amount of training data in the final training stage
-N_TRAIN_SAMPLES = 50000       # number of training samples per class in the final SVM training stage
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG,
-                                 save_debug_data_flag = SAVE_DEBUG_DATA_FLAG,
-                                 reduced_train_data_flag = REDUCED_TRAIN_DATA_FLAG,
-                                 n_train_samples = N_TRAIN_SAMPLES)
+TRAINER_GRID_SEARCH_PARAMS = {
+    'cost': [2**P for P in range(-3, 14, 2)],
+    'gamma': [2**P for P in range(-15, 0, 2)]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+SAVE_DEBUG_DATA_FLAG = True  # save the data, which might be useful for debugging
+REDUCED_TRAIN_DATA_FLAG = True  # reduce the amount of training data in the final training stage
+N_TRAIN_SAMPLES = 50000  # number of training samples per class in the final SVM training stage
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG,
+    save_debug_data_flag=SAVE_DEBUG_DATA_FLAG,
+    reduced_train_data_flag=REDUCED_TRAIN_DATA_FLAG,
+    n_train_samples=N_TRAIN_SAMPLES)
 """
 The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -96,11 +95,3 @@ The final training of the SVM is done on the subset of training data ``reduced_t
 The size of the subset for the final training stage is defined by the ``n_train_samples`` argument.
 The data is also mean-std normalized, ``mean_std_norm_flag = True``.
 """
-
-
-
-
-
-
-
-
diff --git a/bob/pad/face/config/grid.py b/bob/pad/face/config/grid.py
index 8d3061f5..5d6738f8 100644
--- a/bob/pad/face/config/grid.py
+++ b/bob/pad/face/config/grid.py
@@ -6,39 +6,29 @@ from bob.bio.base.grid import Grid
 # Configuration to run on computation cluster:
 idiap = Grid(
     training_queue='8G-io-big',
-
     number_of_preprocessing_jobs=32,
     preprocessing_queue='4G-io-big',
-
     number_of_extraction_jobs=32,
     extraction_queue='4G-io-big',
-
     number_of_projection_jobs=32,
     projection_queue='4G-io-big',
-
     number_of_enrollment_jobs=32,
     enrollment_queue='4G-io-big',
-
     number_of_scoring_jobs=1,
     scoring_queue='4G-io-big',
-    )
+)
 
 # Configuration to run on user machines:
 idiap_user_machines = Grid(
     training_queue='32G',
-
     number_of_preprocessing_jobs=32,
     preprocessing_queue='4G',
-
     number_of_extraction_jobs=32,
     extraction_queue='8G',
-
     number_of_projection_jobs=32,
     projection_queue='8G',
-
     number_of_enrollment_jobs=32,
     enrollment_queue='8G',
-
     number_of_scoring_jobs=1,
     scoring_queue='8G',
-    )
+)
diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
index ff16f3b4..94866288 100644
--- a/bob/pad/face/config/lbp_svm.py
+++ b/bob/pad/face/config/lbp_svm.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run LBP and SVM based face PAD baseline.
 The settings are tuned for the Replay-attack database.
@@ -8,7 +7,6 @@ The idea of the algorithm is introduced in the following paper: [CAM12]_.
 However some settings are different from the ones introduced in the paper.
 """
 
-
 #=======================================================================================
 sub_directory = 'lbp_svm'
 """
@@ -19,33 +17,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
-MIN_FACE_SIZE = 50            # Minimal possible size of the face
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-COLOR_CHANNEL = 'gray'        # Convert image to gray-scale format
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             color_channel = COLOR_CHANNEL)
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
+MIN_FACE_SIZE = 50  # Minimal possible size of the face
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    color_channel=COLOR_CHANNEL)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
@@ -53,32 +51,31 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 [CAM12]_, which is defined by ``use_local_cropper_flag = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoLBPHistogram
 
-LBPTYPE='uniform'
-ELBPTYPE='regular'
-RAD=1
-NEIGHBORS=8
-CIRC=False
-DTYPE=None
-
-extractor = VideoLBPHistogram(lbptype=LBPTYPE,
-                              elbptype=ELBPTYPE,
-                              rad=RAD,
-                              neighbors=NEIGHBORS,
-                              circ=CIRC,
-                              dtype=DTYPE)
+LBPTYPE = 'uniform'
+ELBPTYPE = 'regular'
+RAD = 1
+NEIGHBORS = 8
+CIRC = False
+DTYPE = None
+
+extractor = VideoLBPHistogram(
+    lbptype=LBPTYPE,
+    elbptype=ELBPTYPE,
+    rad=RAD,
+    neighbors=NEIGHBORS,
+    circ=CIRC,
+    dtype=DTYPE)
 """
 In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
 
 The parameters are similar to the ones introduced in [CAM12]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -87,16 +84,20 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'C_SVC'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+TRAINER_GRID_SEARCH_PARAMS = {
+    'cost': [2**P for P in range(-3, 14, 2)],
+    'gamma': [2**P for P in range(-15, 0, 2)]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 """
 The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
diff --git a/bob/pad/face/config/lbp_svm_aggregated_db.py b/bob/pad/face/config/lbp_svm_aggregated_db.py
index 5d249a7c..e2daaa2f 100644
--- a/bob/pad/face/config/lbp_svm_aggregated_db.py
+++ b/bob/pad/face/config/lbp_svm_aggregated_db.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run LBP and SVM based face PAD baseline.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
@@ -10,7 +9,6 @@ The idea of the algorithm is introduced in the following paper: [CAM12]_.
 However some settings are different from the ones introduced in the paper.
 """
 
-
 #=======================================================================================
 sub_directory = 'lbp_svm_aggregated_db'
 """
@@ -21,33 +19,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
-MIN_FACE_SIZE = 50            # Minimal possible size of the face
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-COLOR_CHANNEL = 'gray'        # Convert image to gray-scale format
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             color_channel = COLOR_CHANNEL)
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
+MIN_FACE_SIZE = 50  # Minimal possible size of the face
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    color_channel=COLOR_CHANNEL)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
@@ -55,31 +53,30 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 [CAM12]_, which is defined by ``use_local_cropper_flag = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoLBPHistogram
 
-LBPTYPE='uniform'
-ELBPTYPE='regular'
-RAD=1
-NEIGHBORS=8
-CIRC=False
-DTYPE=None
-
-extractor = VideoLBPHistogram(lbptype=LBPTYPE,
-                              elbptype=ELBPTYPE,
-                              rad=RAD,
-                              neighbors=NEIGHBORS,
-                              circ=CIRC,
-                              dtype=DTYPE)
+LBPTYPE = 'uniform'
+ELBPTYPE = 'regular'
+RAD = 1
+NEIGHBORS = 8
+CIRC = False
+DTYPE = None
+
+extractor = VideoLBPHistogram(
+    lbptype=LBPTYPE,
+    elbptype=ELBPTYPE,
+    rad=RAD,
+    neighbors=NEIGHBORS,
+    circ=CIRC,
+    dtype=DTYPE)
 """
 In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
 The parameters are similar to the ones introduced in [CAM12]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -88,22 +85,26 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'C_SVC'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-SAVE_DEBUG_DATA_FLAG = True    # save the data, which might be useful for debugging
-REDUCED_TRAIN_DATA_FLAG = True # reduce the amount of training data in the final training stage
-N_TRAIN_SAMPLES = 50000       # number of training samples per class in the final SVM training stage
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG,
-                                 save_debug_data_flag = SAVE_DEBUG_DATA_FLAG,
-                                 reduced_train_data_flag = REDUCED_TRAIN_DATA_FLAG,
-                                 n_train_samples = N_TRAIN_SAMPLES)
+TRAINER_GRID_SEARCH_PARAMS = {
+    'cost': [2**P for P in range(-3, 14, 2)],
+    'gamma': [2**P for P in range(-15, 0, 2)]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+SAVE_DEBUG_DATA_FLAG = True  # save the data, which might be useful for debugging
+REDUCED_TRAIN_DATA_FLAG = True  # reduce the amount of training data in the final training stage
+N_TRAIN_SAMPLES = 50000  # number of training samples per class in the final SVM training stage
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG,
+    save_debug_data_flag=SAVE_DEBUG_DATA_FLAG,
+    reduced_train_data_flag=REDUCED_TRAIN_DATA_FLAG,
+    n_train_samples=N_TRAIN_SAMPLES)
 """
 The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -113,4 +114,4 @@ The size of this subset is defined by ``n_samples`` parameter.
 The final training of the SVM is done on the subset of training data ``reduced_train_data_flag = True``.
 The size of the subset for the final training stage is defined by the ``n_train_samples`` argument.
 The data is also mean-std normalized, ``mean_std_norm_flag = True``.
-"""
\ No newline at end of file
+"""
diff --git a/bob/pad/face/config/mifs.py b/bob/pad/face/config/mifs.py
index 215a7da0..45c29fa7 100644
--- a/bob/pad/face/config/mifs.py
+++ b/bob/pad/face/config/mifs.py
@@ -1,6 +1,4 @@
 #!/usr/bin/env python
-
-
 """`MIFS`_ is a face makeup spoofing database adapted for face PAD experiments.
 
 Database assembled from a dataset consisting of 107 makeup-transformations taken
@@ -26,7 +24,7 @@ from bob.pad.face.database import MIFSPadDatabase
 ORIGINAL_DIRECTORY = "[YOUR_MIFS_DATABASE_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-ORIGINAL_EXTENSION = "" # extension of the data files
+ORIGINAL_EXTENSION = ""  # extension of the data files
 
 database = MIFSPadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/config/msu_mfsd.py b/bob/pad/face/config/msu_mfsd.py
index 133cc2ff..7b91401a 100644
--- a/bob/pad/face/config/msu_mfsd.py
+++ b/bob/pad/face/config/msu_mfsd.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`MSU MFSD`_ is a database for face PAD experiments.
 
 Database created at MSU, for face-PAD experiments. The public version of the database contains
@@ -23,7 +22,7 @@ from bob.pad.face.database import MsuMfsdPadDatabase
 ORIGINAL_DIRECTORY = "[YOUR_MSU_MFSD_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-ORIGINAL_EXTENSION = "none" # extension is not used to load the data in the HLDI of this database
+ORIGINAL_EXTENSION = "none"  # extension is not used to load the data in the HLDI of this database
 
 database = MsuMfsdPadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/config/preprocessor/filename.py b/bob/pad/face/config/preprocessor/filename.py
index feeefff0..ab7fa910 100644
--- a/bob/pad/face/config/preprocessor/filename.py
+++ b/bob/pad/face/config/preprocessor/filename.py
@@ -2,4 +2,3 @@ from bob.bio.base.preprocessor import Filename
 
 # This preprocessor does nothing, returning just the name of the file to extract the features from:
 empty_preprocessor = Filename()
-
diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py
index 72156454..08ed1448 100644
--- a/bob/pad/face/config/preprocessor/video_face_crop.py
+++ b/bob/pad/face/config/preprocessor/video_face_crop.py
@@ -6,61 +6,56 @@ from bob.pad.face.preprocessor import VideoFaceCrop
 #=======================================================================================
 # Define instances here:
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-DETECT_FACES_FLAG = True      # find annotations locally replacing the database annotations
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+DETECT_FACES_FLAG = True  # find annotations locally replacing the database annotations
 FACE_DETECTION_METHOD = "dlib"
 
-rgb_face_detector_dlib = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                                       cropped_positions = CROPPED_POSITIONS,
-                                       fixed_positions = FIXED_POSITIONS,
-                                       mask_sigma = MASK_SIGMA,
-                                       mask_neighbors = MASK_NEIGHBORS,
-                                       mask_seed = None,
-                                       check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                       min_face_size = MIN_FACE_SIZE,
-                                       use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                                       rgb_output_flag = RGB_OUTPUT_FLAG,
-                                       detect_faces_flag = DETECT_FACES_FLAG,
-                                       face_detection_method = FACE_DETECTION_METHOD)
-
-
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+rgb_face_detector_dlib = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG,
+    detect_faces_flag=DETECT_FACES_FLAG,
+    face_detection_method=FACE_DETECTION_METHOD)
+
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-DETECT_FACES_FLAG = True      # find annotations locally replacing the database annotations
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+DETECT_FACES_FLAG = True  # find annotations locally replacing the database annotations
 FACE_DETECTION_METHOD = "mtcnn"
 
-rgb_face_detector_mtcnn = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                                       cropped_positions = CROPPED_POSITIONS,
-                                       fixed_positions = FIXED_POSITIONS,
-                                       mask_sigma = MASK_SIGMA,
-                                       mask_neighbors = MASK_NEIGHBORS,
-                                       mask_seed = None,
-                                       check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                       min_face_size = MIN_FACE_SIZE,
-                                       use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                                       rgb_output_flag = RGB_OUTPUT_FLAG,
-                                       detect_faces_flag = DETECT_FACES_FLAG,
-                                       face_detection_method = FACE_DETECTION_METHOD)
-
-
-
-
-
-
+rgb_face_detector_mtcnn = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG,
+    detect_faces_flag=DETECT_FACES_FLAG,
+    face_detection_method=FACE_DETECTION_METHOD)
diff --git a/bob/pad/face/config/preprocessor/video_sparse_coding.py b/bob/pad/face/config/preprocessor/video_sparse_coding.py
index 525c6de6..ee24dbf4 100644
--- a/bob/pad/face/config/preprocessor/video_sparse_coding.py
+++ b/bob/pad/face/config/preprocessor/video_sparse_coding.py
@@ -15,22 +15,25 @@ NORM_FACE_SIZE = 64
 DICTIONARY_LENGTH = 128
 DIR = os.path.dirname(os.path.abspath(__file__))
 
-DICTIONARY_FILE_NAMES = [os.path.join(DIR, "dictionaries", "dictionary_front_10_5_{}.hdf5".format(DICTIONARY_LENGTH)),
-                         os.path.join(DIR, "dictionaries", "dictionary_hor_10_5_{}.hdf5".format(DICTIONARY_LENGTH)),
-                         os.path.join(DIR, "dictionaries", "dictionary_vert_10_5_{}.hdf5".format(DICTIONARY_LENGTH))]
-
-FRAME_STEP = 50 # (!) a small number of feature vectors will be computed
+DICTIONARY_FILE_NAMES = [
+    os.path.join(DIR, "dictionaries",
+                 "dictionary_front_10_5_{}.hdf5".format(DICTIONARY_LENGTH)),
+    os.path.join(DIR, "dictionaries",
+                 "dictionary_hor_10_5_{}.hdf5".format(DICTIONARY_LENGTH)),
+    os.path.join(DIR, "dictionaries",
+                 "dictionary_vert_10_5_{}.hdf5".format(DICTIONARY_LENGTH))
+]
+
+FRAME_STEP = 50  # (!) a small number of feature vectors will be computed
 EXTRACT_HISTOGRAMS_FLAG = True
 COMP_RECONSTRUCT_ERR_FLAG = False
 
-preprocessor_10_5_128 = VideoSparseCoding(gblock_size = BLOCK_SIZE,
-                                          block_length = BLOCK_LENGTH,
-                                          min_face_size = MIN_FACE_SIZE,
-                                          norm_face_size = NORM_FACE_SIZE,
-                                          dictionary_file_names = DICTIONARY_FILE_NAMES,
-                                          frame_step = FRAME_STEP,
-                                          extract_histograms_flag = EXTRACT_HISTOGRAMS_FLAG,
-                                          comp_reconstruct_err_flag = COMP_RECONSTRUCT_ERR_FLAG)
-
-
-
+preprocessor_10_5_128 = VideoSparseCoding(
+    gblock_size=BLOCK_SIZE,
+    block_length=BLOCK_LENGTH,
+    min_face_size=MIN_FACE_SIZE,
+    norm_face_size=NORM_FACE_SIZE,
+    dictionary_file_names=DICTIONARY_FILE_NAMES,
+    frame_step=FRAME_STEP,
+    extract_histograms_flag=EXTRACT_HISTOGRAMS_FLAG,
+    comp_reconstruct_err_flag=COMP_RECONSTRUCT_ERR_FLAG)
diff --git a/bob/pad/face/config/qm_lr.py b/bob/pad/face/config/qm_lr.py
index 11c4404c..f971fdee 100644
--- a/bob/pad/face/config/qm_lr.py
+++ b/bob/pad/face/config/qm_lr.py
@@ -1,13 +1,11 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Image Quality Measures (IQM) and LR based face PAD algorithm.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'qm_lr'
 """
@@ -18,33 +16,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             rgb_output_flag = RGB_OUTPUT_FLAG)
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
@@ -53,41 +51,34 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoQualityMeasure
 
-GALBALLY=True
-MSU=True
-DTYPE=None
+GALBALLY = True
+MSU = True
+DTYPE = None
 
-extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                msu=MSU,
-                                dtype=DTYPE)
+extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 """
 In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
 The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
 from ..algorithm import VideoLRPadAlgorithm
 
-C = 1. # The regularization parameter for the LR classifier
-FRAME_LEVEL_SCORES_FLAG = True # Return one score per frame
-
-algorithm = VideoLRPadAlgorithm(C = C,
-                                frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+C = 1.  # The regularization parameter for the LR classifier
+FRAME_LEVEL_SCORES_FLAG = True  # Return one score per frame
 
+algorithm = VideoLRPadAlgorithm(
+    C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 """
 The Logistic Regression is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
 The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
 values.
 """
-
-
diff --git a/bob/pad/face/config/qm_one_class_gmm.py b/bob/pad/face/config/qm_one_class_gmm.py
index d6e87704..c74e7b7f 100644
--- a/bob/pad/face/config/qm_one_class_gmm.py
+++ b/bob/pad/face/config/qm_one_class_gmm.py
@@ -1,13 +1,11 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Image Quality Measures (IQM) and one-class GMM based face PAD algorithm.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'qm_one_class_gmm'
 """
@@ -18,33 +16,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             rgb_output_flag = RGB_OUTPUT_FLAG)
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
@@ -53,25 +51,21 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoQualityMeasure
 
-GALBALLY=True
-MSU=True
-DTYPE=None
+GALBALLY = True
+MSU = True
+DTYPE = None
 
-extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                msu=MSU,
-                                dtype=DTYPE)
+extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 """
 In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
 The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -81,16 +75,12 @@ N_COMPONENTS = 50
 RANDOM_STATE = 3
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                 random_state = RANDOM_STATE,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm = VideoGmmPadAlgorithm(
+    n_components=N_COMPONENTS,
+    random_state=RANDOM_STATE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 """
 The GMM with 50 clusters is trained using samples from the real class only. The pre-trained
 GMM is next used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
 """
-
-
-
-
-
diff --git a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
index f4cbe08f..07522368 100644
--- a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Image Quality Measures (IQM) and one-class SVM based face PAD algorithm.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
@@ -9,7 +8,6 @@ large data sets, such as Aggregated PAD database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'qm_one_class_svm_aggregated_db'
 """
@@ -20,33 +18,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             rgb_output_flag = RGB_OUTPUT_FLAG)
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
@@ -55,25 +53,21 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoQualityMeasure
 
-GALBALLY=True
-MSU=True
-DTYPE=None
+GALBALLY = True
+MSU = True
+DTYPE = None
 
-extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                msu=MSU,
-                                dtype=DTYPE)
+extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 """
 In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
 The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -82,22 +76,26 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'ONE_CLASS'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 50000
-TRAINER_GRID_SEARCH_PARAMS = {'nu': [0.001, 0.01, 0.05, 0.1], 'gamma': [0.01, 0.1, 1, 10]}
-MEAN_STD_NORM_FLAG = True       # enable mean-std normalization
+TRAINER_GRID_SEARCH_PARAMS = {
+    'nu': [0.001, 0.01, 0.05, 0.1],
+    'gamma': [0.01, 0.1, 1, 10]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
 FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
-SAVE_DEBUG_DATA_FLAG = True     # save the data, which might be useful for debugging
-REDUCED_TRAIN_DATA_FLAG = False # DO NOT reduce the amount of training data in the final training stage
-N_TRAIN_SAMPLES = 50000         # number of training samples per class in the final SVM training stage (NOT considered, because REDUCED_TRAIN_DATA_FLAG = False)
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG,
-                                 save_debug_data_flag = SAVE_DEBUG_DATA_FLAG,
-                                 reduced_train_data_flag = REDUCED_TRAIN_DATA_FLAG,
-                                 n_train_samples = N_TRAIN_SAMPLES)
+SAVE_DEBUG_DATA_FLAG = True  # save the data, which might be useful for debugging
+REDUCED_TRAIN_DATA_FLAG = False  # DO NOT reduce the amount of training data in the final training stage
+N_TRAIN_SAMPLES = 50000  # number of training samples per class in the final SVM training stage (NOT considered, because REDUCED_TRAIN_DATA_FLAG = False)
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG,
+    save_debug_data_flag=SAVE_DEBUG_DATA_FLAG,
+    reduced_train_data_flag=REDUCED_TRAIN_DATA_FLAG,
+    n_train_samples=N_TRAIN_SAMPLES)
 """
 The one-class SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -107,5 +105,3 @@ The size of this subset is defined by ``n_samples`` parameter.
 The final training of the SVM is done on all training data ``reduced_train_data_flag = False``.
 The data is also mean-std normalized, ``mean_std_norm_flag = True``.
 """
-
-
diff --git a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
index b6aecc93..f98d25cd 100644
--- a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Image Quality Measures (IQM) and SVM based face PAD baseline.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
@@ -9,7 +8,6 @@ large data sets, such as Aggregated PAD database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'qm_svm_aggregated_db'
 """
@@ -20,33 +18,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             rgb_output_flag = RGB_OUTPUT_FLAG)
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
@@ -55,25 +53,21 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoQualityMeasure
 
-GALBALLY=True
-MSU=True
-DTYPE=None
+GALBALLY = True
+MSU = True
+DTYPE = None
 
-extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                msu=MSU,
-                                dtype=DTYPE)
+extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 """
 In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
 The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -86,12 +80,13 @@ N = 2
 POS_SCORES_SLOPE = 0.01
 FRAME_LEVEL_SCORES_FLAG = True
 
-algorithm = VideoCascadeSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                        kernel_type = KERNEL_TYPE,
-                                        svm_kwargs = SVM_KWARGS,
-                                        N = N,
-                                        pos_scores_slope = POS_SCORES_SLOPE,
-                                        frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+algorithm = VideoCascadeSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    svm_kwargs=SVM_KWARGS,
+    N=N,
+    pos_scores_slope=POS_SCORES_SLOPE,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 """
 The cascade of one-class SVMs with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -99,4 +94,3 @@ A single SVM in the cascade is trained using two features ``N = 2``.
 The positive scores produced by the cascade are reduced by multiplying them with a constant
 ``pos_scores_slope = 0.01``.
 """
-
diff --git a/bob/pad/face/config/qm_svm.py b/bob/pad/face/config/qm_svm.py
index b6ee74c4..00324214 100644
--- a/bob/pad/face/config/qm_svm.py
+++ b/bob/pad/face/config/qm_svm.py
@@ -1,13 +1,11 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Image Quality Measures (IQM) and SVM based face PAD baseline.
 The settings are tuned for the Replay-attack database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'qm_svm'
 """
@@ -18,33 +16,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             rgb_output_flag = RGB_OUTPUT_FLAG)
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
@@ -53,25 +51,21 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoQualityMeasure
 
-GALBALLY=True
-MSU=True
-DTYPE=None
+GALBALLY = True
+MSU = True
+DTYPE = None
 
-extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                msu=MSU,
-                                dtype=DTYPE)
+extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 """
 In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
 The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -80,16 +74,20 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'C_SVC'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+TRAINER_GRID_SEARCH_PARAMS = {
+    'cost': [2**P for P in range(-3, 14, 2)],
+    'gamma': [2**P for P in range(-15, 0, 2)]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 """
 The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -99,4 +97,3 @@ The size of this subset is defined by ``n_samples`` parameter.
 
 The data is also mean-std normalized, ``mean_std_norm_flag = True``.
 """
-
diff --git a/bob/pad/face/config/qm_svm_aggregated_db.py b/bob/pad/face/config/qm_svm_aggregated_db.py
index a9deb1a5..e260cdc4 100644
--- a/bob/pad/face/config/qm_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_svm_aggregated_db.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
-
 """
 This file contains configurations to run Image Quality Measures (IQM) and SVM based face PAD baseline.
 The settings of the preprocessor and extractor are tuned for the Replay-attack database.
@@ -9,7 +8,6 @@ large data sets, such as Aggregated PAD database.
 The IQM features used in this algorithm/resource are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 sub_directory = 'qm_svm_aggregated_db'
 """
@@ -20,33 +18,33 @@ or the attribute ``sub_directory`` in a configuration file loaded **after**
 this resource.
 """
 
-
 #=======================================================================================
 # define preprocessor:
 
 from ..preprocessor import VideoFaceCrop
 
-CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
 FIXED_POSITIONS = None
-MASK_SIGMA = None             # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-MASK_SEED = None              # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+MASK_SIGMA = None  # The sigma for random values areas outside image
+MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+MASK_SEED = None  # The seed for generating random values during extrapolation
+CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
 MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                             cropped_positions = CROPPED_POSITIONS,
-                             fixed_positions = FIXED_POSITIONS,
-                             mask_sigma = MASK_SIGMA,
-                             mask_neighbors = MASK_NEIGHBORS,
-                             mask_seed = None,
-                             check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                             min_face_size = MIN_FACE_SIZE,
-                             use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                             rgb_output_flag = RGB_OUTPUT_FLAG)
+USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+
+preprocessor = VideoFaceCrop(
+    cropped_image_size=CROPPED_IMAGE_SIZE,
+    cropped_positions=CROPPED_POSITIONS,
+    fixed_positions=FIXED_POSITIONS,
+    mask_sigma=MASK_SIGMA,
+    mask_neighbors=MASK_NEIGHBORS,
+    mask_seed=None,
+    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+    min_face_size=MIN_FACE_SIZE,
+    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+    rgb_output_flag=RGB_OUTPUT_FLAG)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
@@ -55,25 +53,21 @@ below ``min_face_size`` threshold are discarded. The preprocessor is similar to
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
-
 #=======================================================================================
 # define extractor:
 
 from ..extractor import VideoQualityMeasure
 
-GALBALLY=True
-MSU=True
-DTYPE=None
+GALBALLY = True
+MSU = True
+DTYPE = None
 
-extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                msu=MSU,
-                                dtype=DTYPE)
+extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 """
 In the feature extraction stage the Image Quality Measures are extracted from each frame of the preprocessed RGB video.
 The features to be computed are introduced in the following papers: [WHJ15]_ and [CBVM16]_.
 """
 
-
 #=======================================================================================
 # define algorithm:
 
@@ -82,22 +76,26 @@ from ..algorithm import VideoSvmPadAlgorithm
 MACHINE_TYPE = 'C_SVC'
 KERNEL_TYPE = 'RBF'
 N_SAMPLES = 10000
-TRAINER_GRID_SEARCH_PARAMS = {'cost': [2**P for P in range(-3, 14, 2)], 'gamma': [2**P for P in range(-15, 0, 2)]}
-MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-SAVE_DEBUG_DATA_FLAG = True    # save the data, which might be useful for debugging
-REDUCED_TRAIN_DATA_FLAG = True # reduce the amount of training data in the final training stage
-N_TRAIN_SAMPLES = 50000       # number of training samples per class in the final SVM training stage
-
-algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                 kernel_type = KERNEL_TYPE,
-                                 n_samples = N_SAMPLES,
-                                 trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                 mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                 frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG,
-                                 save_debug_data_flag = SAVE_DEBUG_DATA_FLAG,
-                                 reduced_train_data_flag = REDUCED_TRAIN_DATA_FLAG,
-                                 n_train_samples = N_TRAIN_SAMPLES)
+TRAINER_GRID_SEARCH_PARAMS = {
+    'cost': [2**P for P in range(-3, 14, 2)],
+    'gamma': [2**P for P in range(-15, 0, 2)]
+}
+MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+SAVE_DEBUG_DATA_FLAG = True  # save the data, which might be useful for debugging
+REDUCED_TRAIN_DATA_FLAG = True  # reduce the amount of training data in the final training stage
+N_TRAIN_SAMPLES = 50000  # number of training samples per class in the final SVM training stage
+
+algorithm = VideoSvmPadAlgorithm(
+    machine_type=MACHINE_TYPE,
+    kernel_type=KERNEL_TYPE,
+    n_samples=N_SAMPLES,
+    trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+    mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+    frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG,
+    save_debug_data_flag=SAVE_DEBUG_DATA_FLAG,
+    reduced_train_data_flag=REDUCED_TRAIN_DATA_FLAG,
+    n_train_samples=N_TRAIN_SAMPLES)
 """
 The SVM algorithm with RBF kernel is used to classify the data into *real* and *attack* classes.
 One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
@@ -108,9 +106,3 @@ The final training of the SVM is done on the subset of training data ``reduced_t
 The size of the subset for the final training stage is defined by the ``n_train_samples`` argument.
 The data is also mean-std normalized, ``mean_std_norm_flag = True``.
 """
-
-
-
-
-
-
diff --git a/bob/pad/face/config/replay_attack.py b/bob/pad/face/config/replay_attack.py
index 0efc24be..befbc148 100644
--- a/bob/pad/face/config/replay_attack.py
+++ b/bob/pad/face/config/replay_attack.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`Replayattack`_ is a database for face PAD experiments.
 
 The Replay-Attack Database for face spoofing consists of 1300 video clips of photo and video attack attempts to 50 clients,
@@ -19,7 +18,7 @@ from bob.pad.face.database import ReplayPadDatabase
 ORIGINAL_DIRECTORY = "[YOUR_REPLAY_ATTACK_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-ORIGINAL_EXTENSION = ".mov" # extension of the data files
+ORIGINAL_EXTENSION = ".mov"  # extension of the data files
 
 database = ReplayPadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/config/replay_mobile.py b/bob/pad/face/config/replay_mobile.py
index 453ca410..94e61ca6 100644
--- a/bob/pad/face/config/replay_mobile.py
+++ b/bob/pad/face/config/replay_mobile.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 """`Replay-Mobile`_ is a database for face PAD experiments.
 
 The Replay-Mobile Database for face spoofing consists of 1030 video clips of photo and video attack attempts to 40 clients,
@@ -22,7 +21,7 @@ from bob.pad.face.database import ReplayMobilePadDatabase
 ORIGINAL_DIRECTORY = "[YOUR_REPLAY_MOBILE_DIRECTORY]"
 """Value of ``~/.bob_bio_databases.txt`` for this database"""
 
-ORIGINAL_EXTENSION = ".mov" # extension of the data files
+ORIGINAL_EXTENSION = ".mov"  # extension of the data files
 
 database = ReplayMobilePadDatabase(
     protocol='grandtest',
diff --git a/bob/pad/face/database/__init__.py b/bob/pad/face/database/__init__.py
index 5215323e..a2f0469a 100644
--- a/bob/pad/face/database/__init__.py
+++ b/bob/pad/face/database/__init__.py
@@ -7,7 +7,7 @@ from .mifs import MIFSPadDatabase
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
-  """Says object was actually declared here, and not in the import module.
+    """Says object was actually declared here, and not in the import module.
   Fixing sphinx warnings of not being able to find classes, when path is shortened.
   Parameters:
 
@@ -17,7 +17,9 @@ def __appropriate__(*args):
   <https://github.com/sphinx-doc/sphinx/issues/3048>`
   """
 
-  for obj in args: obj.__module__ = __name__
+    for obj in args:
+        obj.__module__ = __name__
+
 
 __appropriate__(
     ReplayPadDatabase,
@@ -25,6 +27,6 @@ __appropriate__(
     MsuMfsdPadDatabase,
     AggregatedDbPadDatabase,
     MIFSPadDatabase,
-    )
+)
 
 __all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/pad/face/database/aggregated_db.py b/bob/pad/face/database/aggregated_db.py
index 3669aee7..fec72e5b 100644
--- a/bob/pad/face/database/aggregated_db.py
+++ b/bob/pad/face/database/aggregated_db.py
@@ -2,7 +2,7 @@
 # -*- coding: utf-8 -*-
 
 #==============================================================================
-from bob.pad.base.database import PadFile # Used in ReplayPadFile class
+from bob.pad.base.database import PadFile  # Used in ReplayPadFile class
 
 from bob.pad.base.database import PadDatabase
 
@@ -19,6 +19,7 @@ from bob.bio.video import FrameSelector
 
 import numpy as np
 
+
 #==============================================================================
 class AggregatedDbPadFile(PadFile):
     """
@@ -51,7 +52,8 @@ class AggregatedDbPadFile(PadFile):
 
         import bob.db.mobio
 
-        if isinstance(f, bob.db.mobio.models.File): # MOBIO files doen't have is_real() method
+        if isinstance(f, bob.db.mobio.models.File
+                      ):  # MOBIO files doen't have is_real() method
 
             attack_type = None
 
@@ -68,12 +70,14 @@ class AggregatedDbPadFile(PadFile):
 
         file_id = self.encode_file_id(f)
 
-        super(AggregatedDbPadFile, self).__init__(client_id = f.client_id, path = file_path,
-                                                  attack_type = attack_type, file_id = file_id)
-
+        super(AggregatedDbPadFile, self).__init__(
+            client_id=f.client_id,
+            path=file_path,
+            attack_type=attack_type,
+            file_id=file_id)
 
     #==========================================================================
-    def encode_file_id(self, f, n = 2000):
+    def encode_file_id(self, f, n=2000):
         """
         Return a modified version of the ``f.id`` ensuring uniqueness of the ids
         across all databases.
@@ -106,25 +110,30 @@ class AggregatedDbPadFile(PadFile):
         import bob.db.msu_mfsd_mod
         import bob.db.mobio
 
-        if isinstance(f, bob.db.replay.models.File): # check if instance of File class of LLDI of Replay-Attack
+        if isinstance(
+                f, bob.db.replay.models.File
+        ):  # check if instance of File class of LLDI of Replay-Attack
 
             file_id = f.id
 
-        if isinstance(f, bob.db.replaymobile.models.File): # check if instance of File class of LLDI of Replay-Mobile
+        if isinstance(
+                f, bob.db.replaymobile.models.File
+        ):  # check if instance of File class of LLDI of Replay-Mobile
 
             file_id = np.int(f.id + n)
 
-        if isinstance(f, bob.db.msu_mfsd_mod.models.File): # check if instance of File class of LLDI of MSU MFSD
+        if isinstance(f, bob.db.msu_mfsd_mod.models.File
+                      ):  # check if instance of File class of LLDI of MSU MFSD
 
-            file_id = np.int(f.id + 2*n)
+            file_id = np.int(f.id + 2 * n)
 
-        if isinstance(f, bob.db.mobio.models.File): # check if instance of File class of LLDI of Mobio
+        if isinstance(f, bob.db.mobio.models.File
+                      ):  # check if instance of File class of LLDI of Mobio
 
-            file_id = np.int(f.id + 3*n)
+            file_id = np.int(f.id + 3 * n)
 
         return file_id
 
-
     #==========================================================================
     def encode_file_path(self, f):
         """
@@ -154,25 +163,30 @@ class AggregatedDbPadFile(PadFile):
         import bob.db.msu_mfsd_mod
         import bob.db.mobio
 
-        if isinstance(f, bob.db.replay.models.File): # check if instance of File class of LLDI of Replay-Attack
+        if isinstance(
+                f, bob.db.replay.models.File
+        ):  # check if instance of File class of LLDI of Replay-Attack
 
             file_path = '_'.join([f.path, 'replay'])
 
-        if isinstance(f, bob.db.replaymobile.models.File): # check if instance of File class of LLDI of Replay-Mobile
+        if isinstance(
+                f, bob.db.replaymobile.models.File
+        ):  # check if instance of File class of LLDI of Replay-Mobile
 
             file_path = '_'.join([f.path, 'replaymobile'])
 
-        if isinstance(f, bob.db.msu_mfsd_mod.models.File): # check if instance of File class of LLDI of MSU MFSD
+        if isinstance(f, bob.db.msu_mfsd_mod.models.File
+                      ):  # check if instance of File class of LLDI of MSU MFSD
 
             file_path = '_'.join([f.path, 'msu_mfsd_mod'])
 
-        if isinstance(f, bob.db.mobio.models.File): # check if instance of File class of LLDI of Mobio
+        if isinstance(f, bob.db.mobio.models.File
+                      ):  # check if instance of File class of LLDI of Mobio
 
             file_path = '_'.join([f.path, 'mobio'])
 
         return file_path
 
-
     #==========================================================================
     def load(self, directory=None, extension='.mov'):
         """
@@ -203,41 +217,56 @@ class AggregatedDbPadFile(PadFile):
 
         directories = directory.split(" ")
 
-        if isinstance(self.f, bob.db.replay.models.File): # check if instance of File class of LLDI of Replay-Attack
+        if isinstance(
+                self.f, bob.db.replay.models.File
+        ):  # check if instance of File class of LLDI of Replay-Attack
 
-            db_pad_file = replay_hldi.ReplayPadFile(self.f) # replay_hldi is HLDI of Replay-Attack
+            db_pad_file = replay_hldi.ReplayPadFile(
+                self.f)  # replay_hldi is HLDI of Replay-Attack
 
             directory = directories[0]
 
-        if isinstance(self.f, bob.db.replaymobile.models.File): # check if instance of File class of LLDI of Replay-Mobile
+        if isinstance(
+                self.f, bob.db.replaymobile.models.File
+        ):  # check if instance of File class of LLDI of Replay-Mobile
 
-            db_pad_file = replay_mobile_hldi.ReplayMobilePadFile(self.f) # replay_mobile_hldi is HLDI of Replay-Mobile
+            db_pad_file = replay_mobile_hldi.ReplayMobilePadFile(
+                self.f)  # replay_mobile_hldi is HLDI of Replay-Mobile
 
             directory = directories[1]
 
-        if isinstance(self.f, bob.db.msu_mfsd_mod.models.File): # check if instance of File class of LLDI of MSU MFSD
+        if isinstance(self.f, bob.db.msu_mfsd_mod.models.File
+                      ):  # check if instance of File class of LLDI of MSU MFSD
 
-            db_pad_file = msu_mfsd_hldi.MsuMfsdPadFile(self.f) # msu_mfsd_hldi is HLDI of MSU MFSD
+            db_pad_file = msu_mfsd_hldi.MsuMfsdPadFile(
+                self.f)  # msu_mfsd_hldi is HLDI of MSU MFSD
 
             directory = directories[2]
 
-        if isinstance(self.f, bob.db.mobio.models.File): # check if instance of File class of LLDI of Mobio
+        if isinstance(self.f, bob.db.mobio.models.File
+                      ):  # check if instance of File class of LLDI of Mobio
 
-            db_pad_file = MobioBioFile(self.f) # msu_mfsd_hldi is HLDI of MSU MFSD
+            db_pad_file = MobioBioFile(
+                self.f)  # msu_mfsd_hldi is HLDI of MSU MFSD
 
             directory = directories[3]
 
         if isinstance(db_pad_file, bob.bio.video.database.mobio.MobioBioFile):
 
-            frame_selector = FrameSelector(selection_style='all') # select all frames of the file
+            frame_selector = FrameSelector(
+                selection_style='all')  # select all frames of the file
 
-            video_data = db_pad_file.load(directory = directory, extension = '.mp4', frame_selector = frame_selector)
+            video_data = db_pad_file.load(
+                directory=directory,
+                extension='.mp4',
+                frame_selector=frame_selector)
 
         else:
 
-            video_data = db_pad_file.load(directory = directory, extension = extension)
+            video_data = db_pad_file.load(
+                directory=directory, extension=extension)
 
-        return video_data # video data
+        return video_data  # video data
 
 
 #==============================================================================
@@ -283,11 +312,11 @@ class AggregatedDbPadDatabase(PadDatabase):
     """
 
     def __init__(
-        self,
-        protocol='grandtest', # grandtest is the default protocol for this database
-        original_directory=None,
-        original_extension=None,
-        **kwargs):
+            self,
+            protocol='grandtest',  # grandtest is the default protocol for this database
+            original_directory=None,
+            original_extension=None,
+            **kwargs):
         """
         **Parameters:**
 
@@ -321,21 +350,27 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         # Since the high level API expects different group names than what the low
         # level API offers, you need to convert them when necessary
-        self.low_level_group_names = ('train', 'devel', 'test') # group names in the low-level database interface
-        self.high_level_group_names = ('train', 'dev', 'eval') # names are expected to be like that in objects() function
+        self.low_level_group_names = (
+            'train', 'devel',
+            'test')  # group names in the low-level database interface
+        self.high_level_group_names = (
+            'train', 'dev',
+            'eval')  # names are expected to be like that in objects() function
 
         # A list of available protocols:
-        self.available_protocols = ['grandtest', 'photo-photo-video', 'video-video-photo', 'grandtest-mobio', 'grandtest-train-eval']
+        self.available_protocols = [
+            'grandtest', 'photo-photo-video', 'video-video-photo',
+            'grandtest-mobio', 'grandtest-train-eval'
+        ]
 
         # Always use super to call parent class methods.
         super(AggregatedDbPadDatabase, self).__init__(
-            name = 'aggregated_db',
-            protocol = protocol,
-            original_directory = original_directory,
-            original_extension = original_extension,
+            name='aggregated_db',
+            protocol=protocol,
+            original_directory=original_directory,
+            original_extension=original_extension,
             **kwargs)
 
-
     #==========================================================================
     def get_mobio_files_given_single_group(self, groups=None, purposes=None):
         """
@@ -366,7 +401,8 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         mobio_files = []
 
-        if (groups is not None) and ('train' in groups) and (purposes is not None) and ('real' in purposes):
+        if (groups is not None) and ('train' in groups) and (
+                purposes is not None) and ('real' in purposes):
 
             files_mobio = self.mobio.all_files()
 
@@ -376,13 +412,14 @@ class AggregatedDbPadDatabase(PadDatabase):
 
                 metadata.append((f.client_id))
 
-            metadata_set = list(set(metadata)) # metadata_set is a list of unique client ids
+            metadata_set = list(
+                set(metadata))  # metadata_set is a list of unique client ids
 
             for f in files_mobio:
 
                 metadata = (f.client_id)
 
-                if metadata in metadata_set: # only one video per client id is selected
+                if metadata in metadata_set:  # only one video per client id is selected
 
                     metadata_set.remove(metadata)
 
@@ -392,9 +429,13 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         return mobio_files
 
-
     #==========================================================================
-    def get_files_given_single_group(self, groups=None, protocol=None, purposes=None, model_ids=None, **kwargs):
+    def get_files_given_single_group(self,
+                                     groups=None,
+                                     protocol=None,
+                                     purposes=None,
+                                     model_ids=None,
+                                     **kwargs):
         """
         This function returns 4 lists of files for Raplay-Attack, Replay-Mobile,
         MSU MFSD and MOBIO databases, which fulfill the given restrictions. This
@@ -469,83 +510,152 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         if protocol == 'grandtest' or protocol is None or groups is None:
 
-            replay_files = self.replay_db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
+            replay_files = self.replay_db.objects(
+                protocol=protocol, groups=groups, cls=purposes, **kwargs)
 
-            replaymobile_files = self.replaymobile_db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
+            replaymobile_files = self.replaymobile_db.objects(
+                protocol=protocol, groups=groups, cls=purposes, **kwargs)
 
-            msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, **kwargs)
+            msu_mfsd_files = self.msu_mfsd_db.objects(
+                group=groups, cls=purposes, **kwargs)
 
         if protocol == 'photo-photo-video':
 
-            if groups == 'train' or groups ==  'devel': # the group names are low-level here: ('train', 'devel', 'test')
+            if groups == 'train' or groups == 'devel':  # the group names are low-level here: ('train', 'devel', 'test')
 
-                replay_files = self.replay_db.objects(protocol='photo', groups=groups, cls=purposes, **kwargs)
+                replay_files = self.replay_db.objects(
+                    protocol='photo', groups=groups, cls=purposes, **kwargs)
 
-                replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups=groups, cls=purposes, sample_type='photo', **kwargs)
+                replaymobile_files = self.replaymobile_db.objects(
+                    protocol='grandtest',
+                    groups=groups,
+                    cls=purposes,
+                    sample_type='photo',
+                    **kwargs)
 
-                msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, instrument = ('print', ''), **kwargs)
+                msu_mfsd_files = self.msu_mfsd_db.objects(
+                    group=groups,
+                    cls=purposes,
+                    instrument=('print', ''),
+                    **kwargs)
 
             if groups == 'test':
 
-                replay_files = self.replay_db.objects(protocol='video', groups=groups, cls=purposes, **kwargs)
+                replay_files = self.replay_db.objects(
+                    protocol='video', groups=groups, cls=purposes, **kwargs)
 
-                replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups=groups, cls=purposes, sample_type='video', **kwargs)
+                replaymobile_files = self.replaymobile_db.objects(
+                    protocol='grandtest',
+                    groups=groups,
+                    cls=purposes,
+                    sample_type='video',
+                    **kwargs)
 
-                msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, instrument = ('video_hd', 'video_mobile', ''), **kwargs)
+                msu_mfsd_files = self.msu_mfsd_db.objects(
+                    group=groups,
+                    cls=purposes,
+                    instrument=('video_hd', 'video_mobile', ''),
+                    **kwargs)
 
         if protocol == 'video-video-photo':
 
-            if groups == 'train' or groups ==  'devel': # the group names are low-level here: ('train', 'devel', 'test')
+            if groups == 'train' or groups == 'devel':  # the group names are low-level here: ('train', 'devel', 'test')
 
-                replay_files = self.replay_db.objects(protocol='video', groups=groups, cls=purposes, **kwargs)
+                replay_files = self.replay_db.objects(
+                    protocol='video', groups=groups, cls=purposes, **kwargs)
 
-                replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups=groups, cls=purposes, sample_type='video', **kwargs)
+                replaymobile_files = self.replaymobile_db.objects(
+                    protocol='grandtest',
+                    groups=groups,
+                    cls=purposes,
+                    sample_type='video',
+                    **kwargs)
 
-                msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, instrument = ('video_hd', 'video_mobile', ''), **kwargs)
+                msu_mfsd_files = self.msu_mfsd_db.objects(
+                    group=groups,
+                    cls=purposes,
+                    instrument=('video_hd', 'video_mobile', ''),
+                    **kwargs)
 
             if groups == 'test':
 
-                replay_files = self.replay_db.objects(protocol='photo', groups=groups, cls=purposes, **kwargs)
+                replay_files = self.replay_db.objects(
+                    protocol='photo', groups=groups, cls=purposes, **kwargs)
 
-                replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups=groups, cls=purposes, sample_type='photo', **kwargs)
+                replaymobile_files = self.replaymobile_db.objects(
+                    protocol='grandtest',
+                    groups=groups,
+                    cls=purposes,
+                    sample_type='photo',
+                    **kwargs)
 
-                msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, instrument = ('print', ''), **kwargs)
+                msu_mfsd_files = self.msu_mfsd_db.objects(
+                    group=groups,
+                    cls=purposes,
+                    instrument=('print', ''),
+                    **kwargs)
 
         mobio_files = []
 
         if protocol == 'grandtest-mobio':
 
-            replay_files = self.replay_db.objects(protocol='grandtest', groups=groups, cls=purposes, **kwargs)
+            replay_files = self.replay_db.objects(
+                protocol='grandtest', groups=groups, cls=purposes, **kwargs)
 
-            replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups=groups, cls=purposes, **kwargs)
+            replaymobile_files = self.replaymobile_db.objects(
+                protocol='grandtest', groups=groups, cls=purposes, **kwargs)
 
-            msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, **kwargs)
+            msu_mfsd_files = self.msu_mfsd_db.objects(
+                group=groups, cls=purposes, **kwargs)
 
-            mobio_files = self.get_mobio_files_given_single_group(groups=groups, purposes=purposes)
+            mobio_files = self.get_mobio_files_given_single_group(
+                groups=groups, purposes=purposes)
 
         if protocol == 'grandtest-train-eval':
 
             if groups == 'train':
 
-                replay_files = self.replay_db.objects(protocol='grandtest', groups=['train', 'devel'], cls=purposes, **kwargs)
+                replay_files = self.replay_db.objects(
+                    protocol='grandtest',
+                    groups=['train', 'devel'],
+                    cls=purposes,
+                    **kwargs)
 
-                replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups=['train', 'devel'], cls=purposes, **kwargs)
+                replaymobile_files = self.replaymobile_db.objects(
+                    protocol='grandtest',
+                    groups=['train', 'devel'],
+                    cls=purposes,
+                    **kwargs)
 
-                msu_mfsd_files = self.msu_mfsd_db.objects(group=['train', 'devel'], cls=purposes, **kwargs)
+                msu_mfsd_files = self.msu_mfsd_db.objects(
+                    group=['train', 'devel'], cls=purposes, **kwargs)
 
             if groups in ['devel', 'test']:
 
-                replay_files = self.replay_db.objects(protocol='grandtest', groups='test', cls=purposes, **kwargs)
+                replay_files = self.replay_db.objects(
+                    protocol='grandtest',
+                    groups='test',
+                    cls=purposes,
+                    **kwargs)
 
-                replaymobile_files = self.replaymobile_db.objects(protocol='grandtest', groups='test', cls=purposes, **kwargs)
+                replaymobile_files = self.replaymobile_db.objects(
+                    protocol='grandtest',
+                    groups='test',
+                    cls=purposes,
+                    **kwargs)
 
-                msu_mfsd_files = self.msu_mfsd_db.objects(group='test', cls=purposes, **kwargs)
+                msu_mfsd_files = self.msu_mfsd_db.objects(
+                    group='test', cls=purposes, **kwargs)
 
         return replay_files, replaymobile_files, msu_mfsd_files, mobio_files
 
-
     #==========================================================================
-    def get_files_given_groups(self, groups=None, protocol=None, purposes=None, model_ids=None, **kwargs):
+    def get_files_given_groups(self,
+                               groups=None,
+                               protocol=None,
+                               purposes=None,
+                               model_ids=None,
+                               **kwargs):
         """
         This function returns 4 lists of files for Raplay-Attack, Replay-Mobile,
         MSU MFSD and MOBIO databases, which fulfill the given restrictions. This
@@ -619,7 +729,8 @@ class AggregatedDbPadDatabase(PadDatabase):
             A list of files corresponding to MOBIO database or an empty list.
         """
 
-        if isinstance(groups, str) or groups is None: # if a single group is given
+        if isinstance(groups,
+                      str) or groups is None:  # if a single group is given
 
             groups = [groups]
 
@@ -633,7 +744,12 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         for group in groups:
 
-            files = self.get_files_given_single_group(groups = group, protocol = protocol, purposes = purposes, model_ids = model_ids, **kwargs)
+            files = self.get_files_given_single_group(
+                groups=group,
+                protocol=protocol,
+                purposes=purposes,
+                model_ids=model_ids,
+                **kwargs)
 
             replay_files += files[0]
 
@@ -645,9 +761,13 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         return replay_files, replaymobile_files, msu_mfsd_files, mobio_files
 
-
     #==========================================================================
-    def objects(self, groups=None, protocol=None, purposes=None, model_ids=None, **kwargs):
+    def objects(self,
+                groups=None,
+                protocol=None,
+                purposes=None,
+                model_ids=None,
+                **kwargs):
         """
         This function returns a list of AggregatedDbPadFile objects, which fulfill the given restrictions.
 
@@ -703,29 +823,30 @@ class AggregatedDbPadDatabase(PadDatabase):
         """
 
         # Convert group names to low-level group names here.
-        groups = self.convert_names_to_lowlevel(groups, self.low_level_group_names, self.high_level_group_names)
+        groups = self.convert_names_to_lowlevel(
+            groups, self.low_level_group_names, self.high_level_group_names)
         # Since this database was designed for PAD experiments, nothing special
         # needs to be done here.
 
-        replay_files, replaymobile_files, msu_mfsd_files, mobio_files = self.get_files_given_groups(groups = groups,
-                                                                                                    protocol = protocol,
-                                                                                                    purposes = purposes,
-                                                                                                    model_ids = model_ids,
-                                                                                                    **kwargs)
+        replay_files, replaymobile_files, msu_mfsd_files, mobio_files = self.get_files_given_groups(
+            groups=groups,
+            protocol=protocol,
+            purposes=purposes,
+            model_ids=model_ids,
+            **kwargs)
 
-#            replay_files = self.replay_db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
-#
-#            replaymobile_files = self.replaymobile_db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
-#
-#            msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, **kwargs)
+        #            replay_files = self.replay_db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
+        #
+        #            replaymobile_files = self.replaymobile_db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
+        #
+        #            msu_mfsd_files = self.msu_mfsd_db.objects(group=groups, cls=purposes, **kwargs)
 
-        files = replay_files + replaymobile_files + msu_mfsd_files + mobio_files # append all files to a single list
+        files = replay_files + replaymobile_files + msu_mfsd_files + mobio_files  # append all files to a single list
 
         files = [AggregatedDbPadFile(f) for f in files]
 
         return files
 
-
     #==========================================================================
     def annotations(self, f):
         """
@@ -756,19 +877,29 @@ class AggregatedDbPadDatabase(PadDatabase):
 
         directories = self.original_directory.split(" ")
 
-        if isinstance(f.f, bob.db.replay.models.File): # check if instance of File class of LLDI of Replay-Attack
+        if isinstance(
+                f.f, bob.db.replay.models.File
+        ):  # check if instance of File class of LLDI of Replay-Attack
 
-            hldi_db = replay_hldi.ReplayPadDatabase(original_directory = directories[0])
+            hldi_db = replay_hldi.ReplayPadDatabase(
+                original_directory=directories[0])
 
-        if isinstance(f.f, bob.db.replaymobile.models.File): # check if instance of File class of LLDI of Replay-Mobile
+        if isinstance(
+                f.f, bob.db.replaymobile.models.File
+        ):  # check if instance of File class of LLDI of Replay-Mobile
 
-            hldi_db = replay_mobile_hldi.ReplayMobilePadDatabase(original_directory = directories[1])
+            hldi_db = replay_mobile_hldi.ReplayMobilePadDatabase(
+                original_directory=directories[1])
 
-        if isinstance(f.f, bob.db.msu_mfsd_mod.models.File): # check if instance of File class of LLDI of MSU MFSD
+        if isinstance(f.f, bob.db.msu_mfsd_mod.models.File
+                      ):  # check if instance of File class of LLDI of MSU MFSD
 
-            hldi_db = msu_mfsd_hldi.MsuMfsdPadDatabase(original_directory = directories[2])
+            hldi_db = msu_mfsd_hldi.MsuMfsdPadDatabase(
+                original_directory=directories[2])
 
-        if self.protocol == "grandtest-mobio" or isinstance(f.f, bob.db.mobio.models.File): # annotations are not available for this protocol
+        if self.protocol == "grandtest-mobio" or isinstance(
+                f.f, bob.db.mobio.models.File
+        ):  # annotations are not available for this protocol
 
             annotations = {}
 
@@ -777,10 +908,3 @@ class AggregatedDbPadDatabase(PadDatabase):
             annotations = hldi_db.annotations(f)
 
         return annotations
-
-
-
-
-
-
-
diff --git a/bob/pad/face/database/mifs.py b/bob/pad/face/database/mifs.py
index 570606a8..beaba9dd 100644
--- a/bob/pad/face/database/mifs.py
+++ b/bob/pad/face/database/mifs.py
@@ -5,24 +5,25 @@
 
 #==============================================================================
 
-import bob.bio.video # Used in MIFSPadFile class
+import bob.bio.video  # Used in MIFSPadFile class
 import bob.io.base
 import numpy as np
 
-from bob.pad.base.database import PadFile # Used in ReplayPadFile class
+from bob.pad.base.database import PadFile  # Used in ReplayPadFile class
 
 from bob.pad.base.database import FileListPadDatabase
 
-
 #==============================================================================
 
+
 class MIFSPadFile(PadFile):
     """
     A high level implementation of the File class for the MIFS database.
     """
 
     def __init__(self, client_id, path, attack_type=None, file_id=None):
-        super(MIFSPadFile, self).__init__(client_id, path, attack_type, file_id)
+        super(MIFSPadFile, self).__init__(client_id, path, attack_type,
+                                          file_id)
 
     #==========================================================================
     def load(self, directory=None, extension=None):
@@ -46,14 +47,17 @@ class MIFSPadFile(PadFile):
             for further details.
         """
 
-        path = self.make_path(directory=directory, extension=extension) # path to the file
-        frame_selector = bob.bio.video.FrameSelector(selection_style = 'all') # this frame_selector will select all frames from the video file
+        path = self.make_path(
+            directory=directory, extension=extension)  # path to the file
+        frame_selector = bob.bio.video.FrameSelector(
+            selection_style='all'
+        )  # this frame_selector will select all frames from the video file
 
         data = bob.io.base.load(path)
-        data = np.expand_dims(data, axis=0) # upgrade to 4D (video)
-        video_data = frame_selector(data) # video data
+        data = np.expand_dims(data, axis=0)  # upgrade to 4D (video)
+        video_data = frame_selector(data)  # video data
 
-        return video_data # video data
+        return video_data  # video data
 
 
 #==============================================================================
@@ -63,19 +67,21 @@ class MIFSPadDatabase(FileListPadDatabase):
     """
 
     def __init__(
-        self,
-        protocol='grandtest', # grandtest is the default protocol for this database
-        original_directory='[YOUR_MIFS_DATABASE_DIRECTORY]',
-        original_extension='.jpg',
-        **kwargs):
+            self,
+            protocol='grandtest',  # grandtest is the default protocol for this database
+            original_directory='[YOUR_MIFS_DATABASE_DIRECTORY]',
+            original_extension='.jpg',
+            **kwargs):
 
         from pkg_resources import resource_filename
         folder = resource_filename(__name__, '../lists/mifs/')
-        super(MIFSPadDatabase, self).__init__(folder, 'mifs',
-                                            pad_file_class=MIFSPadFile,
-                                            protocol = protocol,
-                                            original_directory=original_directory,
-                                            original_extension=original_extension)
+        super(MIFSPadDatabase, self).__init__(
+            folder,
+            'mifs',
+            pad_file_class=MIFSPadFile,
+            protocol=protocol,
+            original_directory=original_directory,
+            original_extension=original_extension)
 
     #==========================================================================
     def annotations(self, f):
@@ -97,12 +103,12 @@ class MIFSPadDatabase(FileListPadDatabase):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        path_to_file    = self.m_base_dir + '/annotations/' + f.path[:-4] + '.face'
-        file_handle     = open(path_to_file, 'r')
-        line            = file_handle.readline()
-        bbox            = [int(x) for x in line.split()]
+        path_to_file = self.m_base_dir + '/annotations/' + f.path[:-4] + '.face'
+        file_handle = open(path_to_file, 'r')
+        line = file_handle.readline()
+        bbox = [int(x) for x in line.split()]
 
-        annotations = {} # dictionary to return
+        annotations = {}  # dictionary to return
 
         topleft = (bbox[0], bbox[1])
         bottomright = (bbox[0] + bbox[2], bbox[1] + bbox[3])
diff --git a/bob/pad/face/database/msu_mfsd.py b/bob/pad/face/database/msu_mfsd.py
index 52c7fb8f..eb4ed5f7 100644
--- a/bob/pad/face/database/msu_mfsd.py
+++ b/bob/pad/face/database/msu_mfsd.py
@@ -1,11 +1,10 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
 
-
 #==============================================================================
-import bob.bio.video # Used in MsuMfsdPadFile class
+import bob.bio.video  # Used in MsuMfsdPadFile class
 
-from bob.pad.base.database import PadFile # Used in MsuMfsdPadFile class
+from bob.pad.base.database import PadFile  # Used in MsuMfsdPadFile class
 
 from bob.pad.base.database import PadDatabase
 
@@ -13,6 +12,7 @@ import os
 
 import numpy as np
 
+
 #==============================================================================
 class MsuMfsdPadFile(PadFile):
     """
@@ -43,9 +43,11 @@ class MsuMfsdPadFile(PadFile):
         # attack_type is a string and I decided to make it like this for this
         # particular database. You can do whatever you want for your own database.
 
-        super(MsuMfsdPadFile, self).__init__(client_id=f.client_id, path=f.path,
-                                            attack_type=attack_type, file_id=f.id)
-
+        super(MsuMfsdPadFile, self).__init__(
+            client_id=f.client_id,
+            path=f.path,
+            attack_type=attack_type,
+            file_id=f.id)
 
     #==========================================================================
     def convert_arr_to_frame_cont(self, data):
@@ -66,7 +68,8 @@ class MsuMfsdPadFile(PadFile):
             Resulting FrameContainer containing RGB frames.
         """
 
-        frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        frames = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         for idx, sample in enumerate(data):
 
@@ -74,7 +77,6 @@ class MsuMfsdPadFile(PadFile):
 
         return frames
 
-
     #==========================================================================
     def load(self, directory=None, extension=None):
         """
@@ -98,12 +100,14 @@ class MsuMfsdPadFile(PadFile):
             for further details.
         """
 
-        _, extension = os.path.splitext(self.f.videofile()) # get file extension
+        _, extension = os.path.splitext(
+            self.f.videofile())  # get file extension
 
-        video_data_array = self.f.load(directory = directory,
-                                       extension = extension)
+        video_data_array = self.f.load(
+            directory=directory, extension=extension)
 
-        video_data = self.convert_arr_to_frame_cont(video_data_array) # the result is now a FrameContainer
+        video_data = self.convert_arr_to_frame_cont(
+            video_data_array)  # the result is now a FrameContainer
 
         return video_data
 
@@ -115,11 +119,11 @@ class MsuMfsdPadDatabase(PadDatabase):
     """
 
     def __init__(
-        self,
-        protocol='grandtest', # grandtest is the default protocol for this database
-        original_directory=None,
-        original_extension=None,
-        **kwargs):
+            self,
+            protocol='grandtest',  # grandtest is the default protocol for this database
+            original_directory=None,
+            original_extension=None,
+            **kwargs):
         """
         **Parameters:**
 
@@ -142,20 +146,28 @@ class MsuMfsdPadDatabase(PadDatabase):
 
         # Since the high level API expects different group names than what the low
         # level API offers, you need to convert them when necessary
-        self.low_level_group_names = ('train', 'devel', 'test') # group names in the low-level database interface
-        self.high_level_group_names = ('train', 'dev', 'eval') # names are expected to be like that in objects() function
+        self.low_level_group_names = (
+            'train', 'devel',
+            'test')  # group names in the low-level database interface
+        self.high_level_group_names = (
+            'train', 'dev',
+            'eval')  # names are expected to be like that in objects() function
 
         # Always use super to call parent class methods.
         super(MsuMfsdPadDatabase, self).__init__(
-            name = 'msu-mfsd',
-            protocol = protocol,
-            original_directory = original_directory,
-            original_extension = original_extension,
+            name='msu-mfsd',
+            protocol=protocol,
+            original_directory=original_directory,
+            original_extension=original_extension,
             **kwargs)
 
-
     #==========================================================================
-    def objects(self, groups=None, protocol=None, purposes=None, model_ids=None, **kwargs):
+    def objects(self,
+                groups=None,
+                protocol=None,
+                purposes=None,
+                model_ids=None,
+                **kwargs):
         """
         This function returns lists of MsuMfsdPadFile objects, which fulfill the given restrictions.
 
@@ -186,7 +198,8 @@ class MsuMfsdPadDatabase(PadDatabase):
         """
 
         # Convert group names to low-level group names here.
-        groups = self.convert_names_to_lowlevel(groups, self.low_level_group_names, self.high_level_group_names)
+        groups = self.convert_names_to_lowlevel(
+            groups, self.low_level_group_names, self.high_level_group_names)
         # Since this database was designed for PAD experiments, nothing special
         # needs to be done here.
         files = self.db.objects(group=groups, cls=purposes, **kwargs)
@@ -195,7 +208,6 @@ class MsuMfsdPadDatabase(PadDatabase):
 
         return files
 
-
     #==========================================================================
     def annotations(self, f):
         """
@@ -219,17 +231,21 @@ class MsuMfsdPadDatabase(PadDatabase):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        annots = f.f.bbx(directory=self.original_directory) # numpy array containing the face bounding box data for each video frame, returned data format described in the f.bbx() method of the low level interface
+        annots = f.f.bbx(
+            directory=self.original_directory
+        )  # numpy array containing the face bounding box data for each video frame, returned data format described in the f.bbx() method of the low level interface
 
-        annotations = {} # dictionary to return
+        annotations = {}  # dictionary to return
 
         for frame_annots in annots:
 
-            topleft = ( np.int( frame_annots[2] ), np.int( frame_annots[1] ) )
-            bottomright = ( np.int( frame_annots[2] + frame_annots[4] ), np.int( frame_annots[1] + frame_annots[3] ) )
+            topleft = (np.int(frame_annots[2]), np.int(frame_annots[1]))
+            bottomright = (np.int(frame_annots[2] + frame_annots[4]),
+                           np.int(frame_annots[1] + frame_annots[3]))
 
-            annotations[str( np.int( frame_annots[0] ) )] = {'topleft': topleft, 'bottomright': bottomright}
+            annotations[str(np.int(frame_annots[0]))] = {
+                'topleft': topleft,
+                'bottomright': bottomright
+            }
 
         return annotations
-
-
diff --git a/bob/pad/face/database/replay.py b/bob/pad/face/database/replay.py
index d9656f3d..fb1092ca 100644
--- a/bob/pad/face/database/replay.py
+++ b/bob/pad/face/database/replay.py
@@ -1,18 +1,17 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
 
-
 #==============================================================================
 
-import bob.bio.video # Used in ReplayPadFile class
+import bob.bio.video  # Used in ReplayPadFile class
 
-from bob.pad.base.database import PadFile # Used in ReplayPadFile class
+from bob.pad.base.database import PadFile  # Used in ReplayPadFile class
 
 from bob.pad.base.database import PadDatabase
 
-
 #==============================================================================
 
+
 class ReplayPadFile(PadFile):
     """
     A high level implementation of the File class for the REPLAY-ATTACK database.
@@ -42,9 +41,11 @@ class ReplayPadFile(PadFile):
         # attack_type is a string and I decided to make it like this for this
         # particular database. You can do whatever you want for your own database.
 
-        super(ReplayPadFile, self).__init__(client_id=f.client_id, path=f.path,
-                                            attack_type=attack_type, file_id=f.id)
-
+        super(ReplayPadFile, self).__init__(
+            client_id=f.client_id,
+            path=f.path,
+            attack_type=attack_type,
+            file_id=f.id)
 
     #==========================================================================
     def load(self, directory=None, extension='.mov'):
@@ -66,13 +67,16 @@ class ReplayPadFile(PadFile):
             for further details.
         """
 
-        path = self.f.make_path(directory=directory, extension=extension) # path to the video file
+        path = self.f.make_path(
+            directory=directory, extension=extension)  # path to the video file
 
-        frame_selector = bob.bio.video.FrameSelector(selection_style = 'all') # this frame_selector will select all frames from the video file
+        frame_selector = bob.bio.video.FrameSelector(
+            selection_style='all'
+        )  # this frame_selector will select all frames from the video file
 
-        video_data = frame_selector(path) # video data
+        video_data = frame_selector(path)  # video data
 
-        return video_data # video data
+        return video_data  # video data
 
 
 #==============================================================================
@@ -82,11 +86,11 @@ class ReplayPadDatabase(PadDatabase):
     """
 
     def __init__(
-        self,
-        protocol='grandtest', # grandtest is the default protocol for this database
-        original_directory=None,
-        original_extension=None,
-        **kwargs):
+            self,
+            protocol='grandtest',  # grandtest is the default protocol for this database
+            original_directory=None,
+            original_extension=None,
+            **kwargs):
         """
         **Parameters:**
 
@@ -109,20 +113,28 @@ class ReplayPadDatabase(PadDatabase):
 
         # Since the high level API expects different group names than what the low
         # level API offers, you need to convert them when necessary
-        self.low_level_group_names = ('train', 'devel', 'test') # group names in the low-level database interface
-        self.high_level_group_names = ('train', 'dev', 'eval') # names are expected to be like that in objects() function
+        self.low_level_group_names = (
+            'train', 'devel',
+            'test')  # group names in the low-level database interface
+        self.high_level_group_names = (
+            'train', 'dev',
+            'eval')  # names are expected to be like that in objects() function
 
         # Always use super to call parent class methods.
         super(ReplayPadDatabase, self).__init__(
-            name = 'replay',
-            protocol = protocol,
-            original_directory = original_directory,
-            original_extension = original_extension,
+            name='replay',
+            protocol=protocol,
+            original_directory=original_directory,
+            original_extension=original_extension,
             **kwargs)
 
-
     #==========================================================================
-    def objects(self, groups=None, protocol=None, purposes=None, model_ids=None, **kwargs):
+    def objects(self,
+                groups=None,
+                protocol=None,
+                purposes=None,
+                model_ids=None,
+                **kwargs):
         """
         This function returns lists of ReplayPadFile objects, which fulfill the given restrictions.
 
@@ -153,14 +165,15 @@ class ReplayPadDatabase(PadDatabase):
         """
 
         # Convert group names to low-level group names here.
-        groups = self.convert_names_to_lowlevel(groups, self.low_level_group_names, self.high_level_group_names)
+        groups = self.convert_names_to_lowlevel(
+            groups, self.low_level_group_names, self.high_level_group_names)
         # Since this database was designed for PAD experiments, nothing special
         # needs to be done here.
-        files = self.db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
+        files = self.db.objects(
+            protocol=protocol, groups=groups, cls=purposes, **kwargs)
         files = [ReplayPadFile(f) for f in files]
         return files
 
-
     #==========================================================================
     def annotations(self, f):
         """
@@ -184,16 +197,21 @@ class ReplayPadDatabase(PadDatabase):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        annots = f.f.bbx(directory=self.original_directory) # numpy array containing the face bounding box data for each video frame, returned data format described in the f.bbx() method of the low level interface
+        annots = f.f.bbx(
+            directory=self.original_directory
+        )  # numpy array containing the face bounding box data for each video frame, returned data format described in the f.bbx() method of the low level interface
 
-        annotations = {} # dictionary to return
+        annotations = {}  # dictionary to return
 
         for fn, frame_annots in enumerate(annots):
 
             topleft = (frame_annots[2], frame_annots[1])
-            bottomright = (frame_annots[2] + frame_annots[4], frame_annots[1] + frame_annots[3])
+            bottomright = (frame_annots[2] + frame_annots[4],
+                           frame_annots[1] + frame_annots[3])
 
-            annotations[str(fn)] = {'topleft': topleft, 'bottomright': bottomright}
+            annotations[str(fn)] = {
+                'topleft': topleft,
+                'bottomright': bottomright
+            }
 
         return annotations
-
diff --git a/bob/pad/face/database/replay_mobile.py b/bob/pad/face/database/replay_mobile.py
index 9745e181..862d4555 100644
--- a/bob/pad/face/database/replay_mobile.py
+++ b/bob/pad/face/database/replay_mobile.py
@@ -1,11 +1,10 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
 
-
 #==============================================================================
-import bob.bio.video # Used in ReplayMobilePadFile class
+import bob.bio.video  # Used in ReplayMobilePadFile class
 
-from bob.pad.base.database import PadFile # Used in ReplayMobilePadFile class
+from bob.pad.base.database import PadFile  # Used in ReplayMobilePadFile class
 
 from bob.pad.base.database import PadDatabase
 
@@ -40,9 +39,11 @@ class ReplayMobilePadFile(PadFile):
         # attack_type is a string and I decided to make it like this for this
         # particular database. You can do whatever you want for your own database.
 
-        super(ReplayMobilePadFile, self).__init__(client_id=f.client_id, path=f.path,
-                                            attack_type=attack_type, file_id=f.id)
-
+        super(ReplayMobilePadFile, self).__init__(
+            client_id=f.client_id,
+            path=f.path,
+            attack_type=attack_type,
+            file_id=f.id)
 
     #==========================================================================
     def convert_arr_to_frame_cont(self, data):
@@ -63,7 +64,8 @@ class ReplayMobilePadFile(PadFile):
             Resulting FrameContainer containing RGB frames.
         """
 
-        frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        frames = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         for idx, sample in enumerate(data):
 
@@ -71,7 +73,6 @@ class ReplayMobilePadFile(PadFile):
 
         return frames
 
-
     #==========================================================================
     def load(self, directory=None, extension='.mov'):
         """
@@ -92,10 +93,11 @@ class ReplayMobilePadFile(PadFile):
             for further details.
         """
 
-        video_data_array = self.f.load(directory = directory,
-                                       extension = extension)
+        video_data_array = self.f.load(
+            directory=directory, extension=extension)
 
-        video_data = self.convert_arr_to_frame_cont(video_data_array) # the result is now a FrameContainer
+        video_data = self.convert_arr_to_frame_cont(
+            video_data_array)  # the result is now a FrameContainer
 
         return video_data
 
@@ -107,11 +109,11 @@ class ReplayMobilePadDatabase(PadDatabase):
     """
 
     def __init__(
-        self,
-        protocol='grandtest', # grandtest is the default protocol for this database
-        original_directory=None,
-        original_extension=None,
-        **kwargs):
+            self,
+            protocol='grandtest',  # grandtest is the default protocol for this database
+            original_directory=None,
+            original_extension=None,
+            **kwargs):
         """
         **Parameters:**
 
@@ -134,20 +136,28 @@ class ReplayMobilePadDatabase(PadDatabase):
 
         # Since the high level API expects different group names than what the low
         # level API offers, you need to convert them when necessary
-        self.low_level_group_names = ('train', 'devel', 'test') # group names in the low-level database interface
-        self.high_level_group_names = ('train', 'dev', 'eval') # names are expected to be like that in objects() function
+        self.low_level_group_names = (
+            'train', 'devel',
+            'test')  # group names in the low-level database interface
+        self.high_level_group_names = (
+            'train', 'dev',
+            'eval')  # names are expected to be like that in objects() function
 
         # Always use super to call parent class methods.
         super(ReplayMobilePadDatabase, self).__init__(
-            name = 'replay-mobile',
-            protocol = protocol,
-            original_directory = original_directory,
-            original_extension = original_extension,
+            name='replay-mobile',
+            protocol=protocol,
+            original_directory=original_directory,
+            original_extension=original_extension,
             **kwargs)
 
-
     #==========================================================================
-    def objects(self, groups=None, protocol=None, purposes=None, model_ids=None, **kwargs):
+    def objects(self,
+                groups=None,
+                protocol=None,
+                purposes=None,
+                model_ids=None,
+                **kwargs):
         """
         This function returns lists of ReplayMobilePadFile objects, which fulfill the given restrictions.
 
@@ -178,16 +188,17 @@ class ReplayMobilePadDatabase(PadDatabase):
         """
 
         # Convert group names to low-level group names here.
-        groups = self.convert_names_to_lowlevel(groups, self.low_level_group_names, self.high_level_group_names)
+        groups = self.convert_names_to_lowlevel(
+            groups, self.low_level_group_names, self.high_level_group_names)
         # Since this database was designed for PAD experiments, nothing special
         # needs to be done here.
-        files = self.db.objects(protocol=protocol, groups=groups, cls=purposes, **kwargs)
+        files = self.db.objects(
+            protocol=protocol, groups=groups, cls=purposes, **kwargs)
 
         files = [ReplayMobilePadFile(f) for f in files]
 
         return files
 
-
     #==========================================================================
     def annotations(self, f):
         """
@@ -211,16 +222,21 @@ class ReplayMobilePadDatabase(PadDatabase):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        annots = f.f.bbx(directory=self.original_directory) # numpy array containing the face bounding box data for each video frame, returned data format described in the f.bbx() method of the low level interface
+        annots = f.f.bbx(
+            directory=self.original_directory
+        )  # numpy array containing the face bounding box data for each video frame, returned data format described in the f.bbx() method of the low level interface
 
-        annotations = {} # dictionary to return
+        annotations = {}  # dictionary to return
 
         for fn, frame_annots in enumerate(annots):
 
             topleft = (frame_annots[1], frame_annots[0])
-            bottomright = (frame_annots[1] + frame_annots[3], frame_annots[0] + frame_annots[2])
+            bottomright = (frame_annots[1] + frame_annots[3],
+                           frame_annots[0] + frame_annots[2])
 
-            annotations[str(fn)] = {'topleft': topleft, 'bottomright': bottomright}
+            annotations[str(fn)] = {
+                'topleft': topleft,
+                'bottomright': bottomright
+            }
 
         return annotations
-
diff --git a/bob/pad/face/extractor/FrameDiffFeatures.py b/bob/pad/face/extractor/FrameDiffFeatures.py
index 8560a5fe..f7b2a941 100644
--- a/bob/pad/face/extractor/FrameDiffFeatures.py
+++ b/bob/pad/face/extractor/FrameDiffFeatures.py
@@ -20,6 +20,7 @@ import bob.bio.video
 #==============================================================================
 # Main body:
 
+
 class FrameDiffFeatures(Extractor):
     """
     This class is designed to extract features describing frame differences.
@@ -47,18 +48,13 @@ class FrameDiffFeatures(Extractor):
         0 (no overlapping) and 'window-size'-1. Default: 0.
     """
 
-    def __init__(self,
-                 window_size,
-                 overlap = 0):
+    def __init__(self, window_size, overlap=0):
 
-        Extractor.__init__(self,
-                           window_size = window_size,
-                           overlap = overlap)
+        Extractor.__init__(self, window_size=window_size, overlap=overlap)
 
         self.window_size = window_size
         self.overlap = overlap
 
-
     #==========================================================================
     def dcratio(self, arr):
         """
@@ -98,7 +94,6 @@ class FrameDiffFeatures(Extractor):
 
         return dcratio
 
-
     #==========================================================================
     def remove_nan_rows(self, data):
         """
@@ -121,13 +116,12 @@ class FrameDiffFeatures(Extractor):
 
         ret_arr = d[~np.isnan(d.sum(axis=1)), :]
 
-        if ret_arr.shape[0] == 0: # if array is empty, return array of ones
+        if ret_arr.shape[0] == 0:  # if array is empty, return array of ones
 
             ret_arr = np.ones((1, ret_arr.shape[1]))
 
         return ret_arr
 
-
     #==========================================================================
     def cluster_5quantities(self, arr, window_size, overlap):
         """
@@ -176,7 +170,8 @@ class FrameDiffFeatures(Extractor):
         retval = np.ndarray((arr.shape[0], 5), dtype='float64')
         retval[:] = np.NaN
 
-        for k in range(0, arr.shape[0] - window_size + 1, window_size - overlap):
+        for k in range(0, arr.shape[0] - window_size + 1,
+                       window_size - overlap):
 
             obs = arr[k:k + window_size].copy()
 
@@ -189,11 +184,10 @@ class FrameDiffFeatures(Extractor):
             retval[k + window_size - 1] = \
                 (obs.min(), obs.max(), obs.mean(), obs.std(ddof=1), self.dcratio(obs))
 
-        retval = self.remove_nan_rows(retval) # clean-up nan's in the array
+        retval = self.remove_nan_rows(retval)  # clean-up nan's in the array
 
         return retval
 
-
     #==========================================================================
     def convert_arr_to_frame_cont(self, data):
         """
@@ -213,7 +207,8 @@ class FrameDiffFeatures(Extractor):
             a particular sample.
         """
 
-        frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        frames = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         for idx, sample in enumerate(data):
 
@@ -221,7 +216,6 @@ class FrameDiffFeatures(Extractor):
 
         return frames
 
-
     #==========================================================================
     def comp_features(self, data, window_size, overlap):
         """
@@ -248,9 +242,9 @@ class FrameDiffFeatures(Extractor):
             Features describing frame differences, stored in the FrameContainer.
         """
 
-        d_face = self.cluster_5quantities( data[:, 0], window_size, overlap )
+        d_face = self.cluster_5quantities(data[:, 0], window_size, overlap)
 
-        d_bg = self.cluster_5quantities( data[:, 1], window_size, overlap )
+        d_bg = self.cluster_5quantities(data[:, 1], window_size, overlap)
 
         features = np.hstack((d_face, d_bg))
 
@@ -258,7 +252,6 @@ class FrameDiffFeatures(Extractor):
 
         return frames
 
-
     #==========================================================================
     def __call__(self, data):
         """
@@ -282,7 +275,6 @@ class FrameDiffFeatures(Extractor):
 
         return frames
 
-
     #==========================================================================
     def write_feature(self, frames, file_name):
         """
@@ -298,8 +290,8 @@ class FrameDiffFeatures(Extractor):
             Name of the file.
         """
 
-        bob.bio.video.extractor.Wrapper(Extractor()).write_feature(frames, file_name)
-
+        bob.bio.video.extractor.Wrapper(Extractor()).write_feature(
+            frames, file_name)
 
     #==========================================================================
     def read_feature(self, file_name):
@@ -318,9 +310,7 @@ class FrameDiffFeatures(Extractor):
             Frames stored in the frame container.
         """
 
-        frames = bob.bio.video.extractor.Wrapper(Extractor()).read_feature(file_name)
+        frames = bob.bio.video.extractor.Wrapper(
+            Extractor()).read_feature(file_name)
 
         return frames
-
-
-
diff --git a/bob/pad/face/extractor/ImageQualityMeasure.py b/bob/pad/face/extractor/ImageQualityMeasure.py
index f3140a42..3a6bc2c0 100644
--- a/bob/pad/face/extractor/ImageQualityMeasure.py
+++ b/bob/pad/face/extractor/ImageQualityMeasure.py
@@ -16,6 +16,7 @@ logger = logging.getLogger(__name__)
 #==============================================================================
 # Main body:
 
+
 class ImageQualityMeasure(Extractor):
     """
     This class is designed to extract Image Quality Measures given input RGB
@@ -36,24 +37,17 @@ class ImageQualityMeasure(Extractor):
         The data type of the resulting feature vector.
         Default: ``None``.
     """
+
     #==========================================================================
-    def __init__(self,
-                 galbally=True,
-                 msu=True,
-                 dtype=None,
-                 **kwargs):
-
-        Extractor.__init__(self,
-                           galbally=galbally,
-                           msu=msu,
-                           dtype=dtype,
-                           **kwargs)
+    def __init__(self, galbally=True, msu=True, dtype=None, **kwargs):
+
+        Extractor.__init__(
+            self, galbally=galbally, msu=msu, dtype=dtype, **kwargs)
 
         self.dtype = dtype
         self.galbally = galbally
         self.msu = msu
 
-
     #==========================================================================
     def __call__(self, data):
         """
@@ -110,5 +104,3 @@ class ImageQualityMeasure(Extractor):
             features = features.astype(self.dtype)
 
         return features
-
-
diff --git a/bob/pad/face/extractor/LBPHistogram.py b/bob/pad/face/extractor/LBPHistogram.py
index a1b15e16..24af017e 100644
--- a/bob/pad/face/extractor/LBPHistogram.py
+++ b/bob/pad/face/extractor/LBPHistogram.py
@@ -48,12 +48,15 @@ class LBPHistogram(Extractor):
             rad=rad,
             neighbors=neighbors,
             circ=circ,
-            dtype=dtype,)
+            dtype=dtype,
+        )
 
-        elbps = {'regular': 'regular',
-                 'transitional': 'trainsitional',
-                 'direction_coded': 'direction-coded',
-                 'modified': 'regular'}
+        elbps = {
+            'regular': 'regular',
+            'transitional': 'trainsitional',
+            'direction_coded': 'direction-coded',
+            'modified': 'regular'
+        }
 
         if elbptype == 'modified':
             mct = True
@@ -63,31 +66,53 @@ class LBPHistogram(Extractor):
         if lbptype == 'uniform':
             if neighbors == 16:
                 lbp = bob.ip.base.LBP(
-                    neighbors=16, uniform=True, circular=circ,
-                    radius=rad, to_average=mct, elbp_type=elbps[elbptype])
+                    neighbors=16,
+                    uniform=True,
+                    circular=circ,
+                    radius=rad,
+                    to_average=mct,
+                    elbp_type=elbps[elbptype])
             else:  # we assume neighbors==8 in this case
                 lbp = bob.ip.base.LBP(
-                    neighbors=8, uniform=True, circular=circ,
-                    radius=rad, to_average=mct, elbp_type=elbps[elbptype])
+                    neighbors=8,
+                    uniform=True,
+                    circular=circ,
+                    radius=rad,
+                    to_average=mct,
+                    elbp_type=elbps[elbptype])
         elif lbptype == 'riu2':
             if neighbors == 16:
                 lbp = bob.ip.base.LBP(
-                    neighbors=16, uniform=True, rotation_invariant=True,
-                    radius=rad, circular=circ, to_average=mct,
+                    neighbors=16,
+                    uniform=True,
+                    rotation_invariant=True,
+                    radius=rad,
+                    circular=circ,
+                    to_average=mct,
                     elbp_type=elbps[elbptype])
             else:  # we assume neighbors==8 in this case
                 lbp = bob.ip.base.LBP(
-                    neighbors=8, uniform=True, rotation_invariant=True,
-                    radius=rad, circular=circ, to_average=mct,
+                    neighbors=8,
+                    uniform=True,
+                    rotation_invariant=True,
+                    radius=rad,
+                    circular=circ,
+                    to_average=mct,
                     elbp_type=elbps[elbptype])
         else:  # regular LBP
             if neighbors == 16:
                 lbp = bob.ip.base.LBP(
-                    neighbors=16, circular=circ, radius=rad, to_average=mct,
+                    neighbors=16,
+                    circular=circ,
+                    radius=rad,
+                    to_average=mct,
                     elbp_type=elbps[elbptype])
             else:  # we assume neighbors==8 in this case
                 lbp = bob.ip.base.LBP(
-                    neighbors=8, circular=circ, radius=rad, to_average=mct,
+                    neighbors=8,
+                    circular=circ,
+                    radius=rad,
+                    to_average=mct,
                     elbp_type=elbps[elbptype])
 
         self.dtype = dtype
@@ -117,8 +142,8 @@ class LBPHistogram(Extractor):
         # allocating the image with lbp codes
         lbpimage = numpy.ndarray(self.lbp.lbp_shape(data), 'uint16')
         self.lbp(data, lbpimage)  # calculating the lbp image
-        hist = bob.ip.base.histogram(
-            lbpimage, (0, self.lbp.max_label - 1), self.lbp.max_label)
+        hist = bob.ip.base.histogram(lbpimage, (0, self.lbp.max_label - 1),
+                                     self.lbp.max_label)
         hist = hist / sum(hist)  # histogram normalization
         if self.dtype is not None:
             hist = hist.astype(self.dtype)
diff --git a/bob/pad/face/extractor/VideoDataLoader.py b/bob/pad/face/extractor/VideoDataLoader.py
index 18a314db..a008a9b6 100644
--- a/bob/pad/face/extractor/VideoDataLoader.py
+++ b/bob/pad/face/extractor/VideoDataLoader.py
@@ -16,6 +16,7 @@ import bob.bio.video
 #==============================================================================
 # Main body of the class
 
+
 class VideoDataLoader(object):
     """
     This class is designed to load video data given name of the file.
@@ -59,16 +60,15 @@ class VideoDataLoader(object):
             filenames.append(os.path.splitext(f)[0])
             extensions.append(os.path.splitext(f)[1])
 
+        idx = filenames.index(filename_no_ext)  # index of the file
 
-        idx = filenames.index(filename_no_ext) # index of the file
-
-        file_extension = extensions[idx] # get extension of the file
+        file_extension = extensions[idx]  # get extension of the file
 
-        filename_complete = os.path.join(path, filename_no_ext + file_extension)
+        filename_complete = os.path.join(path,
+                                         filename_no_ext + file_extension)
 
         return filename_complete
 
-
     #==========================================================================
     def load_video_data(self, filename_complete):
         """
@@ -85,13 +85,13 @@ class VideoDataLoader(object):
             A FrameContainer containing the loaded video data.
         """
 
-        frame_selector = bob.bio.video.FrameSelector(selection_style = 'all') # select all frames from the video file
+        frame_selector = bob.bio.video.FrameSelector(
+            selection_style='all')  # select all frames from the video file
 
-        video_data = frame_selector(filename_complete) # video data
+        video_data = frame_selector(filename_complete)  # video data
 
         return video_data
 
-
     #==========================================================================
     def __call__(self, filename):
         """
@@ -113,5 +113,3 @@ class VideoDataLoader(object):
         video_data = self.load_video_data(filename_complete)
 
         return video_data
-
-
diff --git a/bob/pad/face/extractor/VideoLBPHistogram.py b/bob/pad/face/extractor/VideoLBPHistogram.py
index ad920ce2..7c2bb8f3 100644
--- a/bob/pad/face/extractor/VideoLBPHistogram.py
+++ b/bob/pad/face/extractor/VideoLBPHistogram.py
@@ -15,10 +15,10 @@ from bob.pad.face.extractor import LBPHistogram
 
 import bob.bio.video
 
-
 #==============================================================================
 # Main body:
 
+
 class VideoLBPHistogram(Extractor, object):
     """
     This class is designed to extract LBP histograms for each frame in the input
@@ -60,13 +60,13 @@ class VideoLBPHistogram(Extractor, object):
                  circ=False,
                  dtype=None):
 
-
-        super(VideoLBPHistogram, self).__init__(lbptype = lbptype,
-                                                elbptype = elbptype,
-                                                rad = rad,
-                                                neighbors = neighbors,
-                                                circ = circ,
-                                                dtype = dtype)
+        super(VideoLBPHistogram, self).__init__(
+            lbptype=lbptype,
+            elbptype=elbptype,
+            rad=rad,
+            neighbors=neighbors,
+            circ=circ,
+            dtype=dtype)
 
         self.lbptype = lbptype
         self.elbptype = elbptype
@@ -76,17 +76,17 @@ class VideoLBPHistogram(Extractor, object):
         self.dtype = dtype
 
         # extractor to process a single image/frame:
-        extractor = LBPHistogram(lbptype=lbptype,
-                                 elbptype=elbptype,
-                                 rad=rad,
-                                 neighbors=neighbors,
-                                 circ=circ,
-                                 dtype=dtype)
+        extractor = LBPHistogram(
+            lbptype=lbptype,
+            elbptype=elbptype,
+            rad=rad,
+            neighbors=neighbors,
+            circ=circ,
+            dtype=dtype)
 
         # a wrapper allowing to apply above extractor to the whole video:
         self.video_extractor = bob.bio.video.extractor.Wrapper(extractor)
 
-
     #==========================================================================
     def __call__(self, frames):
         """
@@ -104,11 +104,10 @@ class VideoLBPHistogram(Extractor, object):
             LBP histograms for each frame stored in the FrameContainer.
         """
 
-        lbp_histograms = self.video_extractor(frames = frames)
+        lbp_histograms = self.video_extractor(frames=frames)
 
         return lbp_histograms
 
-
     #==========================================================================
     def write_feature(self, frames, file_name):
         """
@@ -126,7 +125,6 @@ class VideoLBPHistogram(Extractor, object):
 
         self.video_extractor.write_feature(frames, file_name)
 
-
     #==========================================================================
     def read_feature(self, file_name):
         """
@@ -147,5 +145,3 @@ class VideoLBPHistogram(Extractor, object):
         frames = self.video_extractor.read_feature(file_name)
 
         return frames
-
-
diff --git a/bob/pad/face/extractor/VideoQualityMeasure.py b/bob/pad/face/extractor/VideoQualityMeasure.py
index 33c89e4d..a48e0589 100644
--- a/bob/pad/face/extractor/VideoQualityMeasure.py
+++ b/bob/pad/face/extractor/VideoQualityMeasure.py
@@ -22,6 +22,7 @@ import six
 #==============================================================================
 # Main body:
 
+
 class VideoQualityMeasure(Extractor, object):
     """
     This class is designed to extract Quality Measures for each frame in the
@@ -44,29 +45,22 @@ class VideoQualityMeasure(Extractor, object):
     """
 
     #==========================================================================
-    def __init__(self,
-                 galbally=True,
-                 msu=True,
-                 dtype=None,
-                 **kwargs):
+    def __init__(self, galbally=True, msu=True, dtype=None, **kwargs):
 
-        super(VideoQualityMeasure, self).__init__(galbally = galbally,
-                                                  msu = msu,
-                                                  dtype = dtype)
+        super(VideoQualityMeasure, self).__init__(
+            galbally=galbally, msu=msu, dtype=dtype)
 
         self.galbally = galbally
         self.msu = msu
         self.dtype = dtype
 
         # extractor to process a single image/frame:
-        extractor = ImageQualityMeasure(galbally = galbally,
-                                        msu = msu,
-                                        dtype = dtype)
+        extractor = ImageQualityMeasure(
+            galbally=galbally, msu=msu, dtype=dtype)
 
         # a wrapper allowing to apply above extractor to the whole video:
         self.video_extractor = bob.bio.video.extractor.Wrapper(extractor)
 
-
     #==========================================================================
     def __call__(self, frames):
         """
@@ -89,19 +83,19 @@ class VideoQualityMeasure(Extractor, object):
             Quality Measures for each frame stored in the FrameContainer.
         """
 
-        if isinstance(frames, six.string_types): # if frames is a path(!)
+        if isinstance(frames, six.string_types):  # if frames is a path(!)
 
             video_loader = VideoDataLoader()
 
-            frames = video_loader(frames) # frames is now a FrameContainer
+            frames = video_loader(frames)  # frames is now a FrameContainer
+
 
 #        import ipdb; ipdb.set_trace()
 
-        quality_measures = self.video_extractor(frames = frames)
+        quality_measures = self.video_extractor(frames=frames)
 
         return quality_measures
 
-
     #==========================================================================
     def write_feature(self, frames, file_name):
         """
@@ -119,7 +113,6 @@ class VideoQualityMeasure(Extractor, object):
 
         self.video_extractor.write_feature(frames, file_name)
 
-
     #==========================================================================
     def read_feature(self, file_name):
         """
@@ -140,5 +133,3 @@ class VideoQualityMeasure(Extractor, object):
         frames = self.video_extractor.read_feature(file_name)
 
         return frames
-
-
diff --git a/bob/pad/face/preprocessor/FrameDifference.py b/bob/pad/face/preprocessor/FrameDifference.py
index 2f3cfcf2..638597be 100644
--- a/bob/pad/face/preprocessor/FrameDifference.py
+++ b/bob/pad/face/preprocessor/FrameDifference.py
@@ -22,6 +22,7 @@ import bob.ip.color
 #==============================================================================
 # Main body:
 
+
 class FrameDifference(Preprocessor, object):
     """
     This class is designed to compute frame differences for both facial and
@@ -48,19 +49,19 @@ class FrameDifference(Preprocessor, object):
     """
 
     def __init__(self,
-                 number_of_frames = None,
-                 check_face_size_flag = False,
-                 min_face_size = 50):
+                 number_of_frames=None,
+                 check_face_size_flag=False,
+                 min_face_size=50):
 
-        super(FrameDifference, self).__init__(number_of_frames = number_of_frames,
-                                              check_face_size_flag = check_face_size_flag,
-                                              min_face_size = min_face_size)
+        super(FrameDifference, self).__init__(
+            number_of_frames=number_of_frames,
+            check_face_size_flag=check_face_size_flag,
+            min_face_size=min_face_size)
 
         self.number_of_frames = number_of_frames
         self.check_face_size_flag = check_face_size_flag
         self.min_face_size = min_face_size
 
-
     #==========================================================================
     def eval_face_differences(self, previous, current, annotations):
         """
@@ -87,8 +88,8 @@ class FrameDifference(Preprocessor, object):
             images.
         """
 
-        prev = previous[annotations['topleft'][0]:annotations['bottomright'][0],
-                        annotations['topleft'][1]:annotations['bottomright'][1]]
+        prev = previous[annotations['topleft'][0]:annotations['bottomright'][
+            0], annotations['topleft'][1]:annotations['bottomright'][1]]
 
         curr = current[annotations['topleft'][0]:annotations['bottomright'][0],
                        annotations['topleft'][1]:annotations['bottomright'][1]]
@@ -101,9 +102,12 @@ class FrameDifference(Preprocessor, object):
 
         return face
 
-
     #==========================================================================
-    def eval_background_differences(self, previous, current, annotations, border=None):
+    def eval_background_differences(self,
+                                    previous,
+                                    current,
+                                    annotations,
+                                    border=None):
         """
         Evaluates the normalized frame difference on the background.
 
@@ -147,29 +151,29 @@ class FrameDifference(Preprocessor, object):
             if y1 < 0: y1 = 0
             x1 = annotations['topleft'][1] - border
             if x1 < 0: x1 = 0
-            y2 = y1 + height + (2*border)
+            y2 = y1 + height + (2 * border)
             if y2 > full_diff.shape[0]: y2 = full_diff.shape[0]
-            x2 = x1 + width + (2*border)
+            x2 = x1 + width + (2 * border)
             if x2 > full_diff.shape[1]: x2 = full_diff.shape[1]
             full = full_diff[y1:y2, x1:x2].sum()
             full_size = full_diff[y1:y2, x1:x2].size
 
-        face_diff = full_diff[annotations['topleft'][0]:(annotations['topleft'][0]+height),
-            annotations['topleft'][1]:(annotations['topleft'][1]+width)]
+        face_diff = full_diff[annotations['topleft'][0]:(
+            annotations['topleft'][0] + height), annotations['topleft'][1]:(
+                annotations['topleft'][1] + width)]
 
         # calculates the differences in the face and background areas
         face = face_diff.sum()
         bg = full - face
 
         normalization = float(full_size - face_diff.size)
-        if normalization < 1: #prevents zero division
+        if normalization < 1:  #prevents zero division
             bg = 0.0
         else:
             bg /= float(full_size - face_diff.size)
 
         return bg
 
-
     #==========================================================================
     def check_face_size(self, frame_container, annotations, min_face_size):
         """
@@ -204,34 +208,40 @@ class FrameDifference(Preprocessor, object):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        selected_frames = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        selected_frames = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         selected_annotations = {}
 
         selected_frame_idx = 0
 
-        for idx in range(0, len(annotations)): # idx - frame index
+        for idx in range(0, len(annotations)):  # idx - frame index
 
-            frame_annotations = annotations[str(idx)] # annotations for particular frame
+            frame_annotations = annotations[str(
+                idx)]  # annotations for particular frame
 
             # size of current face
-            face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+            face_size = np.min(
+                np.array(frame_annotations['bottomright']) -
+                np.array(frame_annotations['topleft']))
 
-            if face_size >= min_face_size: # check if face size is above the threshold
+            if face_size >= min_face_size:  # check if face size is above the threshold
 
-                selected_frame = frame_container[idx][1] # get current frame
+                selected_frame = frame_container[idx][1]  # get current frame
 
-                selected_frames.add(selected_frame_idx, selected_frame) # add current frame to FrameContainer
+                selected_frames.add(
+                    selected_frame_idx,
+                    selected_frame)  # add current frame to FrameContainer
 
-                selected_annotations[str(selected_frame_idx)] = annotations[str(idx)]
+                selected_annotations[str(selected_frame_idx)] = annotations[
+                    str(idx)]
 
                 selected_frame_idx = selected_frame_idx + 1
 
         return selected_frames, selected_annotations
 
-
     #==========================================================================
-    def comp_face_bg_diff(self, frames, annotations, number_of_frames = None):
+    def comp_face_bg_diff(self, frames, annotations, number_of_frames=None):
         """
         This function computes the frame differences for both facial and background
         regions. These parameters are computed for ``number_of_frames`` frames
@@ -267,9 +277,9 @@ class FrameDifference(Preprocessor, object):
         else:
             number_of_frames = len(frames)
 
-        previous = frames[0][1] # the first frame in the video
+        previous = frames[0][1]  # the first frame in the video
 
-        if len(previous.shape) == 3: # if RGB convert to gray-scale
+        if len(previous.shape) == 3:  # if RGB convert to gray-scale
             previous = bob.ip.color.rgb_to_gray(previous)
 
         diff = []
@@ -278,11 +288,13 @@ class FrameDifference(Preprocessor, object):
 
             current = frames[k][1]
 
-            if len(current.shape) == 3: # if RGB convert to gray-scale
+            if len(current.shape) == 3:  # if RGB convert to gray-scale
                 current = bob.ip.color.rgb_to_gray(current)
 
-            face_diff = self.eval_face_differences(previous, current, annotations[str(k)])
-            bg_diff = self.eval_background_differences(previous, current, annotations[str(k)], None)
+            face_diff = self.eval_face_differences(previous, current,
+                                                   annotations[str(k)])
+            bg_diff = self.eval_background_differences(
+                previous, current, annotations[str(k)], None)
 
             diff.append((face_diff, bg_diff))
 
@@ -291,7 +303,7 @@ class FrameDifference(Preprocessor, object):
             previous = current
             current = tmp
 
-        if not diff: # if list is empty
+        if not diff:  # if list is empty
 
             diff = [(np.NaN, np.NaN)]
 
@@ -299,7 +311,6 @@ class FrameDifference(Preprocessor, object):
 
         return diff
 
-
     #==========================================================================
     def select_annotated_frames(self, frames, annotations):
         """
@@ -329,28 +340,35 @@ class FrameDifference(Preprocessor, object):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        annotated_frames = np.sort( [np.int(item) for item in annotations.keys()] ) # annotated frame numbers
+        annotated_frames = np.sort([
+            np.int(item) for item in annotations.keys()
+        ])  # annotated frame numbers
 
-        available_frames = range(0,len(frames)) # frame numbers in the input video
+        available_frames = range(
+            0, len(frames))  # frame numbers in the input video
 
-        valid_frames = list(set(annotated_frames).intersection(available_frames)) # valid and annotated frames
+        valid_frames = list(
+            set(annotated_frames).intersection(
+                available_frames))  # valid and annotated frames
 
-        cleaned_frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        cleaned_frame_container = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         cleaned_annotations = {}
 
         for idx, valid_frame_num in enumerate(valid_frames):
             ## valid_frame_num - is the number of the original frame having annotations
 
-            cleaned_annotations[str(idx)] = annotations[str(valid_frame_num)] # correct the frame numbers
+            cleaned_annotations[str(idx)] = annotations[str(
+                valid_frame_num)]  # correct the frame numbers
 
-            selected_frame = frames[valid_frame_num][1] # get current frame
+            selected_frame = frames[valid_frame_num][1]  # get current frame
 
-            cleaned_frame_container.add(idx, selected_frame) # add current frame to FrameContainer
+            cleaned_frame_container.add(
+                idx, selected_frame)  # add current frame to FrameContainer
 
         return cleaned_frame_container, cleaned_annotations
 
-
     #==========================================================================
     def __call__(self, frames, annotations):
         """
@@ -380,19 +398,20 @@ class FrameDifference(Preprocessor, object):
             The second column contains frame differences of non-facial/background regions.
         """
 
-        if len(frames) != len(annotations): # if some annotations are missing
+        if len(frames) != len(annotations):  # if some annotations are missing
 
             ## Select only annotated frames:
-            frames, annotations = self.select_annotated_frames(frames, annotations)
+            frames, annotations = self.select_annotated_frames(
+                frames, annotations)
 
         if self.check_face_size_flag:
 
-            selected_frames, selected_annotations = self.check_face_size(frames, annotations, self.min_face_size)
+            selected_frames, selected_annotations = self.check_face_size(
+                frames, annotations, self.min_face_size)
 
-        diff = self.comp_face_bg_diff(frames = selected_frames,
-                                      annotations = selected_annotations,
-                                      number_of_frames = self.number_of_frames)
+        diff = self.comp_face_bg_diff(
+            frames=selected_frames,
+            annotations=selected_annotations,
+            number_of_frames=self.number_of_frames)
 
         return diff
-
-
diff --git a/bob/pad/face/preprocessor/ImageFaceCrop.py b/bob/pad/face/preprocessor/ImageFaceCrop.py
index 6b998ef4..2b3fefb7 100644
--- a/bob/pad/face/preprocessor/ImageFaceCrop.py
+++ b/bob/pad/face/preprocessor/ImageFaceCrop.py
@@ -20,6 +20,7 @@ import bob.ip.base
 #==============================================================================
 # Main body:
 
+
 class ImageFaceCrop(Preprocessor):
     """
     This class crops the face in the input image given annotations defining
@@ -39,20 +40,16 @@ class ImageFaceCrop(Preprocessor):
         Return RGB cropped face if ``True``, otherwise a gray-scale image is
         returned. Default: ``False``.
     """
-    #==========================================================================
-    def __init__(self,
-                 face_size,
-                 rgb_output_flag = False):
 
+    #==========================================================================
+    def __init__(self, face_size, rgb_output_flag=False):
 
-        Preprocessor.__init__(self,
-                              face_size = face_size,
-                              rgb_output_flag = rgb_output_flag)
+        Preprocessor.__init__(
+            self, face_size=face_size, rgb_output_flag=rgb_output_flag)
 
         self.face_size = face_size
         self.rgb_output_flag = rgb_output_flag
 
-
     #==========================================================================
     def normalize_image_size_in_grayscale(self, image, annotations, face_size):
         """
@@ -81,21 +78,21 @@ class ImageFaceCrop(Preprocessor):
             An image of the cropped face of the size (self.face_size, self.face_size).
         """
 
-        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][0],
-                 annotations['topleft'][1]:annotations['bottomright'][1]]
+        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][
+            0], annotations['topleft'][1]:annotations['bottomright'][1]]
 
         tempbbx = np.ndarray((face_size, face_size), 'float64')
         normbbx = np.ndarray((face_size, face_size), 'uint8')
-        bob.ip.base.scale(cutframe, tempbbx) # normalization
+        bob.ip.base.scale(cutframe, tempbbx)  # normalization
         tempbbx_ = tempbbx + 0.5
         tempbbx_ = np.floor(tempbbx_)
         normbbx = np.cast['uint8'](tempbbx_)
 
         return normbbx
 
-
     #==========================================================================
-    def normalize_image_size(self, image, annotations, face_size, rgb_output_flag):
+    def normalize_image_size(self, image, annotations, face_size,
+                             rgb_output_flag):
         """
         This function crops the face in the input image given annotations defining
         the face bounding box. The size of the face is also normalized to the
@@ -130,29 +127,29 @@ class ImageFaceCrop(Preprocessor):
 
         if len(image.shape) == 3:
 
-            if not(rgb_output_flag):
+            if not (rgb_output_flag):
 
                 image = bob.ip.color.rgb_to_gray(image)
 
         if len(image.shape) == 2:
 
-            image = [image] # make gray-scale image an iterable
+            image = [image]  # make gray-scale image an iterable
 
         result = []
 
-        for image_channel in image: # for all color channels in the input image
+        for image_channel in image:  # for all color channels in the input image
 
-            cropped_face = self.normalize_image_size_in_grayscale(image_channel, annotations, face_size)
+            cropped_face = self.normalize_image_size_in_grayscale(
+                image_channel, annotations, face_size)
 
             result.append(cropped_face)
 
         face = np.stack(result, axis=0)
 
-        face = np.squeeze(face) # squeeze 1-st dimension for gray-scale images
+        face = np.squeeze(face)  # squeeze 1-st dimension for gray-scale images
 
         return face
 
-
     #==========================================================================
     def __call__(self, image, annotations):
         """
@@ -174,8 +171,7 @@ class ImageFaceCrop(Preprocessor):
             rgb 3D or gray-scale 2D.
         """
 
-        norm_face_image = self.normalize_image_size(image, annotations, self.face_size, self.rgb_output_flag)
+        norm_face_image = self.normalize_image_size(
+            image, annotations, self.face_size, self.rgb_output_flag)
 
         return norm_face_image
-
-
diff --git a/bob/pad/face/preprocessor/VideoFaceCrop.py b/bob/pad/face/preprocessor/VideoFaceCrop.py
index a335fc1c..b5433657 100644
--- a/bob/pad/face/preprocessor/VideoFaceCrop.py
+++ b/bob/pad/face/preprocessor/VideoFaceCrop.py
@@ -23,6 +23,7 @@ from ..utils.face_detection_utils import detect_faces_in_video
 #==============================================================================
 # Main body:
 
+
 class VideoFaceCrop(Preprocessor, object):
     """
     This class is designed to crop faces in each frame of the input video given
@@ -98,31 +99,32 @@ class VideoFaceCrop(Preprocessor, object):
     def __init__(self,
                  cropped_image_size,
                  cropped_positions,
-                 fixed_positions = None,
-                 mask_sigma = None,
-                 mask_neighbors = 5,
-                 mask_seed = None,
-                 check_face_size_flag = False,
-                 min_face_size = 50,
-                 use_local_cropper_flag = False,
-                 rgb_output_flag = False,
-                 detect_faces_flag = False,
-                 face_detection_method = "dlib",
+                 fixed_positions=None,
+                 mask_sigma=None,
+                 mask_neighbors=5,
+                 mask_seed=None,
+                 check_face_size_flag=False,
+                 min_face_size=50,
+                 use_local_cropper_flag=False,
+                 rgb_output_flag=False,
+                 detect_faces_flag=False,
+                 face_detection_method="dlib",
                  **kwargs):
 
-        super(VideoFaceCrop, self).__init__(cropped_image_size = cropped_image_size,
-                                            cropped_positions = cropped_positions,
-                                            fixed_positions = fixed_positions,
-                                            mask_sigma = mask_sigma,
-                                            mask_neighbors = mask_neighbors,
-                                            mask_seed = mask_seed,
-                                            check_face_size_flag = check_face_size_flag,
-                                            min_face_size = min_face_size,
-                                            use_local_cropper_flag = use_local_cropper_flag,
-                                            rgb_output_flag = rgb_output_flag,
-                                            detect_faces_flag = detect_faces_flag,
-                                            face_detection_method = face_detection_method,
-                                            **kwargs)
+        super(VideoFaceCrop, self).__init__(
+            cropped_image_size=cropped_image_size,
+            cropped_positions=cropped_positions,
+            fixed_positions=fixed_positions,
+            mask_sigma=mask_sigma,
+            mask_neighbors=mask_neighbors,
+            mask_seed=mask_seed,
+            check_face_size_flag=check_face_size_flag,
+            min_face_size=min_face_size,
+            use_local_cropper_flag=use_local_cropper_flag,
+            rgb_output_flag=rgb_output_flag,
+            detect_faces_flag=detect_faces_flag,
+            face_detection_method=face_detection_method,
+            **kwargs)
 
         self.cropped_image_size = cropped_image_size
         self.cropped_positions = cropped_positions
@@ -143,21 +145,23 @@ class VideoFaceCrop(Preprocessor, object):
 
         if self.use_local_cropper_flag:
 
-            preprocessor = ImageFaceCrop(face_size = self.cropped_image_size[0],
-                                         rgb_output_flag = self.rgb_output_flag)
+            preprocessor = ImageFaceCrop(
+                face_size=self.cropped_image_size[0],
+                rgb_output_flag=self.rgb_output_flag)
 
         else:
 
-            preprocessor = FaceCrop(cropped_image_size = self.cropped_image_size,
-                                    cropped_positions = self.cropped_positions,
-                                    fixed_positions = self.fixed_positions,
-                                    mask_sigma = self.mask_sigma,
-                                    mask_neighbors = self.mask_neighbors,
-                                    mask_seed = self.mask_seed,
-                                    **kwargs)
-
-        self.video_preprocessor = bob.bio.video.preprocessor.Wrapper(preprocessor)
+            preprocessor = FaceCrop(
+                cropped_image_size=self.cropped_image_size,
+                cropped_positions=self.cropped_positions,
+                fixed_positions=self.fixed_positions,
+                mask_sigma=self.mask_sigma,
+                mask_neighbors=self.mask_neighbors,
+                mask_seed=self.mask_seed,
+                **kwargs)
 
+        self.video_preprocessor = bob.bio.video.preprocessor.Wrapper(
+            preprocessor)
 
     #==========================================================================
     def check_face_size(self, frame_container, annotations, min_face_size):
@@ -187,28 +191,35 @@ class VideoFaceCrop(Preprocessor, object):
             overcoming the specified threshold.
         """
 
-        cleaned_frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        cleaned_frame_container = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         selected_frame_idx = 0
 
-        for idx in range(0, np.min( [len(annotations), len(frame_container)] )): # idx - frame index
+        for idx in range(0,
+                         np.min([len(annotations),
+                                 len(frame_container)])):  # idx - frame index
 
-            frame_annotations = annotations[str(idx)] # annotations for particular frame
+            frame_annotations = annotations[str(
+                idx)]  # annotations for particular frame
 
             # size of current face
-            face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+            face_size = np.min(
+                np.array(frame_annotations['bottomright']) -
+                np.array(frame_annotations['topleft']))
 
-            if face_size >= min_face_size: # check if face size is above the threshold
+            if face_size >= min_face_size:  # check if face size is above the threshold
 
-                selected_frame = frame_container[idx][1] # get current frame
+                selected_frame = frame_container[idx][1]  # get current frame
 
-                cleaned_frame_container.add(selected_frame_idx, selected_frame) # add current frame to FrameContainer
+                cleaned_frame_container.add(
+                    selected_frame_idx,
+                    selected_frame)  # add current frame to FrameContainer
 
                 selected_frame_idx = selected_frame_idx + 1
 
         return cleaned_frame_container
 
-
     #==========================================================================
     def select_annotated_frames(self, frames, annotations):
         """
@@ -238,28 +249,35 @@ class VideoFaceCrop(Preprocessor, object):
             is the dictionary defining the coordinates of the face bounding box in frame N.
         """
 
-        annotated_frames = np.sort( [np.int(item) for item in annotations.keys()] ) # annotated frame numbers
+        annotated_frames = np.sort([
+            np.int(item) for item in annotations.keys()
+        ])  # annotated frame numbers
 
-        available_frames = range(0,len(frames)) # frame numbers in the input video
+        available_frames = range(
+            0, len(frames))  # frame numbers in the input video
 
-        valid_frames = list(set(annotated_frames).intersection(available_frames)) # valid and annotated frames
+        valid_frames = list(
+            set(annotated_frames).intersection(
+                available_frames))  # valid and annotated frames
 
-        cleaned_frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        cleaned_frame_container = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         cleaned_annotations = {}
 
         for idx, valid_frame_num in enumerate(valid_frames):
             ## valid_frame_num - is the number of the original frame having annotations
 
-            cleaned_annotations[str(idx)] = annotations[str(valid_frame_num)] # correct the frame numbers
+            cleaned_annotations[str(idx)] = annotations[str(
+                valid_frame_num)]  # correct the frame numbers
 
-            selected_frame = frames[valid_frame_num][1] # get current frame
+            selected_frame = frames[valid_frame_num][1]  # get current frame
 
-            cleaned_frame_container.add(idx, selected_frame) # add current frame to FrameContainer
+            cleaned_frame_container.add(
+                idx, selected_frame)  # add current frame to FrameContainer
 
         return cleaned_frame_container, cleaned_annotations
 
-
     #==========================================================================
     def __call__(self, frames, annotations):
         """
@@ -285,24 +303,27 @@ class VideoFaceCrop(Preprocessor, object):
 
         if self.detect_faces_flag:
 
-            annotations = detect_faces_in_video(frames, self.face_detection_method)
+            annotations = detect_faces_in_video(frames,
+                                                self.face_detection_method)
 
-        if len(frames) != len(annotations): # if some annotations are missing
+        if len(frames) != len(annotations):  # if some annotations are missing
 
             ## Select only annotated frames:
-            frames, annotations = self.select_annotated_frames(frames, annotations)
+            frames, annotations = self.select_annotated_frames(
+                frames, annotations)
 
-        preprocessed_video = self.video_preprocessor(frames = frames, annotations = annotations)
+        preprocessed_video = self.video_preprocessor(
+            frames=frames, annotations=annotations)
 
         if self.check_face_size_flag:
 
-            preprocessed_video = self.check_face_size(preprocessed_video, annotations, self.min_face_size)
+            preprocessed_video = self.check_face_size(
+                preprocessed_video, annotations, self.min_face_size)
 
         return preprocessed_video
 
-
     #==========================================================================
-    def write_data( self, frames, file_name ):
+    def write_data(self, frames, file_name):
         """
         Writes the given data (that has been generated using the __call__ function of this class) to file.
         This method overwrites the write_data() method of the Preprocessor class.
@@ -316,13 +337,12 @@ class VideoFaceCrop(Preprocessor, object):
             name of the file.
         """
 
-        if frames: # save file if FrameContainer is not empty, otherwise do nothing.
+        if frames:  # save file if FrameContainer is not empty, otherwise do nothing.
 
             self.video_preprocessor.write_data(frames, file_name)
 
-
     #==========================================================================
-    def read_data( self, file_name ):
+    def read_data(self, file_name):
         """
         Reads the preprocessed data from file.
         This method overwrites the read_data() method of the Preprocessor class.
@@ -341,5 +361,3 @@ class VideoFaceCrop(Preprocessor, object):
         frames = self.video_preprocessor.read_data(file_name)
 
         return frames
-
-
diff --git a/bob/pad/face/preprocessor/VideoSparseCoding.py b/bob/pad/face/preprocessor/VideoSparseCoding.py
index 40dc41ed..3b122c15 100644
--- a/bob/pad/face/preprocessor/VideoSparseCoding.py
+++ b/bob/pad/face/preprocessor/VideoSparseCoding.py
@@ -86,26 +86,27 @@ class VideoSparseCoding(Preprocessor, object):
 
     #==========================================================================
     def __init__(self,
-                 block_size = 5,
-                 block_length = 10,
-                 min_face_size = 50,
-                 norm_face_size = 64,
-                 dictionary_file_names = [],
-                 frame_step = 1,
-                 extract_histograms_flag = False,
-                 method = "hist",
-                 comp_reconstruct_err_flag = False,
+                 block_size=5,
+                 block_length=10,
+                 min_face_size=50,
+                 norm_face_size=64,
+                 dictionary_file_names=[],
+                 frame_step=1,
+                 extract_histograms_flag=False,
+                 method="hist",
+                 comp_reconstruct_err_flag=False,
                  **kwargs):
 
-        super(VideoSparseCoding, self).__init__(block_size = block_size,
-                                                block_length = block_length,
-                                                min_face_size = min_face_size,
-                                                norm_face_size = norm_face_size,
-                                                dictionary_file_names = dictionary_file_names,
-                                                frame_step = frame_step,
-                                                extract_histograms_flag = extract_histograms_flag,
-                                                comp_reconstruct_err_flag = comp_reconstruct_err_flag,
-                                                method = method)
+        super(VideoSparseCoding, self).__init__(
+            block_size=block_size,
+            block_length=block_length,
+            min_face_size=min_face_size,
+            norm_face_size=norm_face_size,
+            dictionary_file_names=dictionary_file_names,
+            frame_step=frame_step,
+            extract_histograms_flag=extract_histograms_flag,
+            comp_reconstruct_err_flag=comp_reconstruct_err_flag,
+            method=method)
 
         self.block_size = block_size
         self.block_length = block_length
@@ -119,7 +120,6 @@ class VideoSparseCoding(Preprocessor, object):
 
         self.video_preprocessor = bob.bio.video.preprocessor.Wrapper()
 
-
     #==========================================================================
     def crop_norm_face_grayscale(self, image, annotations, face_size):
         """
@@ -149,19 +149,18 @@ class VideoSparseCoding(Preprocessor, object):
             Cropped facial image of the size (self.face_size, self.face_size).
         """
 
-        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][0],
-                 annotations['topleft'][1]:annotations['bottomright'][1]]
+        cutframe = image[annotations['topleft'][0]:annotations['bottomright'][
+            0], annotations['topleft'][1]:annotations['bottomright'][1]]
 
         tempbbx = np.ndarray((face_size, face_size), 'float64')
         normbbx = np.ndarray((face_size, face_size), 'uint8')
-        bob.ip.base.scale(cutframe, tempbbx) # normalization
+        bob.ip.base.scale(cutframe, tempbbx)  # normalization
         tempbbx_ = tempbbx + 0.5
         tempbbx_ = np.floor(tempbbx_)
         normbbx = np.cast['uint8'](tempbbx_)
 
         return normbbx
 
-
     #==========================================================================
     def crop_norm_faces_grayscale(self, images, annotations, face_size):
         """
@@ -192,13 +191,13 @@ class VideoSparseCoding(Preprocessor, object):
 
         for image in images:
 
-            normbbx.append( self.crop_norm_face_grayscale(image, annotations, face_size) )
+            normbbx.append(
+                self.crop_norm_face_grayscale(image, annotations, face_size))
 
         normbbx = np.stack(normbbx)
 
         return normbbx
 
-
     #==========================================================================
     def select_all_blocks(self, images, block_size):
         """
@@ -221,13 +220,12 @@ class VideoSparseCoding(Preprocessor, object):
 
             for col in range(col_num - block_size):
 
-                block = images[:, row:row+block_size, col:col+block_size]
+                block = images[:, row:row + block_size, col:col + block_size]
 
-                all_blocks.append( block )
+                all_blocks.append(block)
 
         return all_blocks
 
-
     #==========================================================================
     def convert_frame_cont_to_grayscale_array(self, frame_cont):
         """
@@ -254,15 +252,16 @@ class VideoSparseCoding(Preprocessor, object):
 
             image = frame[1]
 
-            result_array.append( bob.ip.color.rgb_to_gray(image) )
+            result_array.append(bob.ip.color.rgb_to_gray(image))
 
         result_array = np.stack(result_array)
 
         return result_array
 
-
     #==========================================================================
-    def get_all_blocks_from_color_channel(self, video, annotations, block_size, block_length, min_face_size, norm_face_size):
+    def get_all_blocks_from_color_channel(self, video, annotations, block_size,
+                                          block_length, min_face_size,
+                                          norm_face_size):
         """
         Extract all 3D blocks from facial region of the input 3D array.
         Input 3D array represents one color channel of the video or a gray-scale
@@ -324,30 +323,33 @@ class VideoSparseCoding(Preprocessor, object):
 
         all_blocks = []
 
-        for fn in range(len(video)-block_length):
+        for fn in range(len(video) - block_length):
 
-            if str(fn) in annotated_frames: # process if frame is annotated
+            if str(fn) in annotated_frames:  # process if frame is annotated
 
                 frame_annotations = annotations[str(fn)]
 
-                face_size = np.min(np.array(frame_annotations['bottomright']) - np.array(frame_annotations['topleft']))
+                face_size = np.min(
+                    np.array(frame_annotations['bottomright']) -
+                    np.array(frame_annotations['topleft']))
 
-                if face_size >= min_face_size: # process is face is large enough
+                if face_size >= min_face_size:  # process is face is large enough
 
                     # Selected 3D stacks of images. Stack has ``block_length`` images.
                     stack_of_images = video[fn:fn + block_length, :, :]
 
                     # 3D stacks of normalized face images.
-                    faces = self.crop_norm_faces_grayscale(stack_of_images, frame_annotations, norm_face_size)
+                    faces = self.crop_norm_faces_grayscale(
+                        stack_of_images, frame_annotations, norm_face_size)
 
                     # A list with all blocks per stack of facial images.
-                    list_all_blocks_per_stack = self.select_all_blocks(faces, block_size)
+                    list_all_blocks_per_stack = self.select_all_blocks(
+                        faces, block_size)
 
-                    all_blocks.append( list_all_blocks_per_stack )
+                    all_blocks.append(list_all_blocks_per_stack)
 
         return all_blocks
 
-
     #==========================================================================
     def extract_patches_from_blocks(self, all_blocks):
         """
@@ -398,9 +400,9 @@ class VideoSparseCoding(Preprocessor, object):
 
         lenghth, row_num, col_num = all_blocks[0][0].shape
 
-        selected_row = np.int(row_num/2)
+        selected_row = np.int(row_num / 2)
 
-        selected_col = np.int(col_num/2)
+        selected_col = np.int(col_num / 2)
 
         frontal_patches = []
         horizontal_patches = []
@@ -415,25 +417,27 @@ class VideoSparseCoding(Preprocessor, object):
 
             for block in volume:
 
-                frontal_patch = block[0, :, :] # the frontal patch of a block. Size: (row_num x col_num)
+                frontal_patch = block[
+                    0, :, :]  # the frontal patch of a block. Size: (row_num x col_num)
                 volume_frontal_patches.append(frontal_patch.flatten())
 
-                horizontal_patch = block[:, selected_row, :] # the central-horizontal patch of a block. Size: (lenghth x col_num), where
+                horizontal_patch = block[:,
+                                         selected_row, :]  # the central-horizontal patch of a block. Size: (lenghth x col_num), where
                 # lenghth = block_length, col_num = block_size.
                 volume_horizontal_patches.append(horizontal_patch.flatten())
 
-                vertical_patch = block[:, :, selected_col] # the central-vertical patch of a block. Size: (lenghth x row_num)
+                vertical_patch = block[:, :,
+                                       selected_col]  # the central-vertical patch of a block. Size: (lenghth x row_num)
                 volume_vertical_patches.append(vertical_patch.flatten())
 
-            frontal_patches.append( np.stack(volume_frontal_patches) )
+            frontal_patches.append(np.stack(volume_frontal_patches))
 
-            horizontal_patches.append( np.stack(volume_horizontal_patches) )
+            horizontal_patches.append(np.stack(volume_horizontal_patches))
 
-            vertical_patches.append( np.stack(volume_vertical_patches) )
+            vertical_patches.append(np.stack(volume_vertical_patches))
 
         return frontal_patches, horizontal_patches, vertical_patches
 
-
     #==========================================================================
     def __select_random_patches_single_list(self, patches, n_patches):
         """
@@ -462,15 +466,18 @@ class VideoSparseCoding(Preprocessor, object):
 
         all_patches = np.vstack(patches)
 
-        idx = [random.randint( 0, len(all_patches) - 1 ) for _ in range(n_patches)]
+        idx = [
+            random.randint(0,
+                           len(all_patches) - 1) for _ in range(n_patches)
+        ]
 
         selected_patches = all_patches[idx, :]
 
         return selected_patches
 
-
     #==========================================================================
-    def select_random_patches(self, frontal_patches, horizontal_patches, vertical_patches, n_patches):
+    def select_random_patches(self, frontal_patches, horizontal_patches,
+                              vertical_patches, n_patches):
         """
         Select random patches given lists of frontal, central-horizontal and
         central-vertical patches, as returned by ``extract_patches_from_blocks``
@@ -523,15 +530,17 @@ class VideoSparseCoding(Preprocessor, object):
             (``n_patches`` x ``number_of_features``).
         """
 
-        selected_frontal_patches = self.__select_random_patches_single_list(frontal_patches, n_patches)
+        selected_frontal_patches = self.__select_random_patches_single_list(
+            frontal_patches, n_patches)
 
-        selected_horizontal_patches = self.__select_random_patches_single_list(horizontal_patches, n_patches)
+        selected_horizontal_patches = self.__select_random_patches_single_list(
+            horizontal_patches, n_patches)
 
-        selected_vertical_patches = self.__select_random_patches_single_list(vertical_patches, n_patches)
+        selected_vertical_patches = self.__select_random_patches_single_list(
+            vertical_patches, n_patches)
 
         return selected_frontal_patches, selected_horizontal_patches, selected_vertical_patches
 
-
     #==========================================================================
     def get_sparse_codes_for_patches(self, patches, dictionary):
         """
@@ -561,12 +570,15 @@ class VideoSparseCoding(Preprocessor, object):
 
         algo = 'omp'
 
-        n_nonzero = np.int(dictionary.shape[1]/5.)
+        n_nonzero = np.int(dictionary.shape[1] / 5.)
 
         alpha = n_nonzero
 
-        coder = SparseCoder(dictionary=dictionary, transform_n_nonzero_coefs=n_nonzero,
-                            transform_alpha=alpha, transform_algorithm=algo)
+        coder = SparseCoder(
+            dictionary=dictionary,
+            transform_n_nonzero_coefs=n_nonzero,
+            transform_alpha=alpha,
+            transform_algorithm=algo)
 
         # if a single patch is given of the shape (n_features,) convert it to the shape (1, n_features):
 
@@ -578,9 +590,9 @@ class VideoSparseCoding(Preprocessor, object):
 
         return codes
 
-
     #==========================================================================
-    def get_sparse_codes_for_list_of_patches(self, list_of_patches, dictionary):
+    def get_sparse_codes_for_list_of_patches(self, list_of_patches,
+                                             dictionary):
         """
         Compute sparse codes for each array of vectorized patches in the list.
         This function just calls ``get_sparse_codes_for_patches`` method
@@ -609,7 +621,7 @@ class VideoSparseCoding(Preprocessor, object):
 
         for idx, patches in enumerate(list_of_patches):
 
-#            print idx
+            #            print idx
 
             codes = self.get_sparse_codes_for_patches(patches, dictionary)
 
@@ -617,7 +629,6 @@ class VideoSparseCoding(Preprocessor, object):
 
         return video_codes
 
-
     #==========================================================================
     def load_array_from_hdf5(self, file_name):
         """
@@ -634,15 +645,14 @@ class VideoSparseCoding(Preprocessor, object):
             Downloaded array.
         """
 
-        f = bob.io.base.HDF5File(file_name) #read only
+        f = bob.io.base.HDF5File(file_name)  #read only
 
-        data = f.read('data') #reads integer
+        data = f.read('data')  #reads integer
 
         del f
 
         return data
 
-
     #==========================================================================
     def load_the_dictionaries(self, dictionary_file_names):
         """
@@ -673,15 +683,17 @@ class VideoSparseCoding(Preprocessor, object):
             The dimensions are: (n_words_in_dictionary x n_features_vert)
         """
 
-        dictionary_frontal = self.load_array_from_hdf5(dictionary_file_names[0])
+        dictionary_frontal = self.load_array_from_hdf5(
+            dictionary_file_names[0])
 
-        dictionary_horizontal = self.load_array_from_hdf5(dictionary_file_names[1])
+        dictionary_horizontal = self.load_array_from_hdf5(
+            dictionary_file_names[1])
 
-        dictionary_vertical = self.load_array_from_hdf5(dictionary_file_names[2])
+        dictionary_vertical = self.load_array_from_hdf5(
+            dictionary_file_names[2])
 
         return dictionary_frontal, dictionary_horizontal, dictionary_vertical
 
-
     #==========================================================================
     def convert_sparse_codes_to_frame_container(self, sparse_codes):
         """
@@ -707,21 +719,23 @@ class VideoSparseCoding(Preprocessor, object):
             (``3`` x ``n_samples`` x ``n_words_in_the_dictionary``).
         """
 
-        frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        frame_container = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         idx = 0
 
-        for frontal_codes, horizontal_codes, vertical_codes in zip(sparse_codes[0], sparse_codes[1], sparse_codes[2]):
+        for frontal_codes, horizontal_codes, vertical_codes in zip(
+                sparse_codes[0], sparse_codes[1], sparse_codes[2]):
 
-            frame_3d = np.stack([frontal_codes, horizontal_codes, vertical_codes])
+            frame_3d = np.stack(
+                [frontal_codes, horizontal_codes, vertical_codes])
 
-            frame_container.add(idx, frame_3d) # add frame to FrameContainer
+            frame_container.add(idx, frame_3d)  # add frame to FrameContainer
 
             idx = idx + 1
 
         return frame_container
 
-
     #==========================================================================
     def comp_hist_of_sparse_codes(self, sparse_codes, method):
         """
@@ -757,7 +771,8 @@ class VideoSparseCoding(Preprocessor, object):
 
         histograms = []
 
-        for frontal_codes, horizontal_codes, vertical_codes in zip(sparse_codes[0], sparse_codes[1], sparse_codes[2]):
+        for frontal_codes, horizontal_codes, vertical_codes in zip(
+                sparse_codes[0], sparse_codes[1], sparse_codes[2]):
 
             frame = np.stack([frontal_codes, horizontal_codes, vertical_codes])
 
@@ -767,11 +782,11 @@ class VideoSparseCoding(Preprocessor, object):
 
             if method == "hist":
 
-                frame_codes = np.mean(frame!=0, axis=1)
+                frame_codes = np.mean(frame != 0, axis=1)
 
             for idx, row in enumerate(frame_codes):
 
-                frame_codes[idx,:] = row/np.sum(row)
+                frame_codes[idx, :] = row / np.sum(row)
 
             hist = frame_codes.flatten()
 
@@ -779,7 +794,6 @@ class VideoSparseCoding(Preprocessor, object):
 
         return histograms
 
-
     #==========================================================================
     def convert_arrays_to_frame_container(self, list_of_arrays):
         """
@@ -796,17 +810,20 @@ class VideoSparseCoding(Preprocessor, object):
             FrameContainer containing the feature vectors.
         """
 
-        frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        frame_container = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         for idx, item in enumerate(list_of_arrays):
 
-            frame_container.add(idx, item) # add frame to FrameContainer
+            frame_container.add(idx, item)  # add frame to FrameContainer
 
         return frame_container
 
-
     #==========================================================================
-    def mean_std_normalize(self, features, features_mean= None, features_std = None):
+    def mean_std_normalize(self,
+                           features,
+                           features_mean=None,
+                           features_std=None):
         """
         The features in the input 2D array are mean-std normalized.
         The rows are samples, the columns are features. If ``features_mean``
@@ -848,7 +865,7 @@ class VideoSparseCoding(Preprocessor, object):
 
         row_norm_list = []
 
-        for row in features: # row is a sample
+        for row in features:  # row is a sample
 
             row_norm = (row - features_mean) / features_std
 
@@ -858,9 +875,9 @@ class VideoSparseCoding(Preprocessor, object):
 
         return features_norm, features_mean, features_std
 
-
     #==========================================================================
-    def compute_patches_mean_squared_errors(self, sparse_codes, original_data, dictionary):
+    def compute_patches_mean_squared_errors(self, sparse_codes, original_data,
+                                            dictionary):
         """
         This function computes normalized mean squared errors (MSE) for each
         feature (column) in the reconstructed array of vectorized patches.
@@ -893,13 +910,15 @@ class VideoSparseCoding(Preprocessor, object):
 
         recovered_data = np.dot(sparse_codes, dictionary)
 
-        squared_error = 1.*np.sum((original_data - recovered_data) ** 2, axis=0) / np.sum(original_data**2, axis=0)
+        squared_error = 1. * np.sum(
+            (original_data - recovered_data)**2, axis=0) / np.sum(
+                original_data**2, axis=0)
 
         return squared_error
 
-
     #==========================================================================
-    def compute_mse_for_all_patches_types(self, sparse_codes_list, original_data_list, dictionary_list):
+    def compute_mse_for_all_patches_types(self, sparse_codes_list,
+                                          original_data_list, dictionary_list):
         """
         This function computes mean squared errors (MSE) for all types of patches:
         frontal, horizontal, and vertical. In this case the function
@@ -942,9 +961,11 @@ class VideoSparseCoding(Preprocessor, object):
 
         squared_errors_sorted = []
 
-        for sparse_codes, original_data, dictionary in zip(sparse_codes_list, original_data_list, dictionary_list):
+        for sparse_codes, original_data, dictionary in zip(
+                sparse_codes_list, original_data_list, dictionary_list):
 
-            squared_error = self.compute_patches_mean_squared_errors(sparse_codes, original_data, dictionary)
+            squared_error = self.compute_patches_mean_squared_errors(
+                sparse_codes, original_data, dictionary)
 
             squared_error_sorted = np.sort(squared_error)
 
@@ -960,9 +981,9 @@ class VideoSparseCoding(Preprocessor, object):
 
         return squared_errors
 
-
     #==========================================================================
-    def compute_mse_for_all_stacks(self, video_codes_list, patches_list, dictionary_list):
+    def compute_mse_for_all_stacks(self, video_codes_list, patches_list,
+                                   dictionary_list):
         """
         Call ``compute_mse_for_all_patches_types`` for data coming from all stacks
         of facial images.
@@ -1008,13 +1029,13 @@ class VideoSparseCoding(Preprocessor, object):
 
             original_data_list = [fp, hp, vp]
 
-            squared_errors = self.compute_mse_for_all_patches_types(sparse_codes_list, original_data_list, dictionary_list)
+            squared_errors = self.compute_mse_for_all_patches_types(
+                sparse_codes_list, original_data_list, dictionary_list)
 
             squared_errors_list.append(squared_errors)
 
         return squared_errors_list
 
-
     #==========================================================================
     def __call__(self, frames, annotations):
         """
@@ -1065,15 +1086,17 @@ class VideoSparseCoding(Preprocessor, object):
         video = self.convert_frame_cont_to_grayscale_array(frames)
 
         # Get all blocks from all possible facial stacks:
-        all_blocks = self.get_all_blocks_from_color_channel(video, annotations,
-                                                            self.block_size, self.block_length,
-                                                            self.min_face_size, self.norm_face_size)
+        all_blocks = self.get_all_blocks_from_color_channel(
+            video, annotations, self.block_size, self.block_length,
+            self.min_face_size, self.norm_face_size)
 
         # Extract three sets of patches per each stack of facial images:
-        frontal_patches, horizontal_patches, vertical_patches = self.extract_patches_from_blocks(all_blocks)
+        frontal_patches, horizontal_patches, vertical_patches = self.extract_patches_from_blocks(
+            all_blocks)
 
         # Download the dictionaries:
-        dictionary_frontal, dictionary_horizontal, dictionary_vertical = self.load_the_dictionaries(self.dictionary_file_names)
+        dictionary_frontal, dictionary_horizontal, dictionary_vertical = self.load_the_dictionaries(
+            self.dictionary_file_names)
 
         # Select subset of patches if ``frame_step`` > 1:
         frontal_patches_subset = frontal_patches[::self.frame_step]
@@ -1081,39 +1104,59 @@ class VideoSparseCoding(Preprocessor, object):
         vertical_patches_subset = vertical_patches[::self.frame_step]
 
         # Compute sparse codes for all patches of all types:
-        frontal_video_codes = self.get_sparse_codes_for_list_of_patches(frontal_patches_subset, dictionary_frontal)
-        horizontal_video_codes = self.get_sparse_codes_for_list_of_patches(horizontal_patches_subset, dictionary_horizontal)
-        vertical_video_codes = self.get_sparse_codes_for_list_of_patches(vertical_patches_subset, dictionary_vertical)
+        frontal_video_codes = self.get_sparse_codes_for_list_of_patches(
+            frontal_patches_subset, dictionary_frontal)
+        horizontal_video_codes = self.get_sparse_codes_for_list_of_patches(
+            horizontal_patches_subset, dictionary_horizontal)
+        vertical_video_codes = self.get_sparse_codes_for_list_of_patches(
+            vertical_patches_subset, dictionary_vertical)
 
         if self.comp_reconstruct_err_flag:
 
-            video_codes_list = [frontal_video_codes, horizontal_video_codes, vertical_video_codes]
+            video_codes_list = [
+                frontal_video_codes, horizontal_video_codes,
+                vertical_video_codes
+            ]
 
-            patches_list = [frontal_patches_subset, horizontal_patches_subset, vertical_patches_subset]
+            patches_list = [
+                frontal_patches_subset, horizontal_patches_subset,
+                vertical_patches_subset
+            ]
 
-            dictionary_list = [dictionary_frontal, dictionary_horizontal, dictionary_vertical]
+            dictionary_list = [
+                dictionary_frontal, dictionary_horizontal, dictionary_vertical
+            ]
 
-            squared_errors_list = self.compute_mse_for_all_stacks(video_codes_list, patches_list, dictionary_list)
+            squared_errors_list = self.compute_mse_for_all_stacks(
+                video_codes_list, patches_list, dictionary_list)
 
-            frame_container = self.convert_arrays_to_frame_container(squared_errors_list)
+            frame_container = self.convert_arrays_to_frame_container(
+                squared_errors_list)
 
         else:
 
-            if self.extract_histograms_flag: # in this case histograms will be extracted in the preprocessor , no feature extraction is needed then
+            if self.extract_histograms_flag:  # in this case histograms will be extracted in the preprocessor , no feature extraction is needed then
 
-                histograms = self.comp_hist_of_sparse_codes([frontal_video_codes, horizontal_video_codes, vertical_video_codes], self.method)
+                histograms = self.comp_hist_of_sparse_codes([
+                    frontal_video_codes, horizontal_video_codes,
+                    vertical_video_codes
+                ], self.method)
 
-                frame_container = self.convert_arrays_to_frame_container(histograms)
+                frame_container = self.convert_arrays_to_frame_container(
+                    histograms)
 
             else:
 
-                frame_container = self.convert_sparse_codes_to_frame_container([frontal_video_codes, horizontal_video_codes, vertical_video_codes])
+                frame_container = self.convert_sparse_codes_to_frame_container(
+                    [
+                        frontal_video_codes, horizontal_video_codes,
+                        vertical_video_codes
+                    ])
 
         return frame_container
 
-
     #==========================================================================
-    def write_data( self, frames, file_name ):
+    def write_data(self, frames, file_name):
         """
         Writes the given data (that has been generated using the __call__
         function of this class) to file. This method overwrites the write_data()
@@ -1130,9 +1173,8 @@ class VideoSparseCoding(Preprocessor, object):
 
         self.video_preprocessor.write_data(frames, file_name)
 
-
     #==========================================================================
-    def read_data( self, file_name ):
+    def read_data(self, file_name):
         """
         Reads the preprocessed data from file.
         This method overwrites the read_data() method of the Preprocessor class.
@@ -1151,11 +1193,3 @@ class VideoSparseCoding(Preprocessor, object):
         frames = self.video_preprocessor.read_data(file_name)
 
         return frames
-
-
-
-
-
-
-
-
diff --git a/bob/pad/face/preprocessor/__init__.py b/bob/pad/face/preprocessor/__init__.py
index dc5b3685..d2b91b80 100644
--- a/bob/pad/face/preprocessor/__init__.py
+++ b/bob/pad/face/preprocessor/__init__.py
@@ -29,4 +29,3 @@ __appropriate__(
     VideoSparseCoding,
 )
 __all__ = [_ for _ in dir() if not _.startswith('_')]
-
diff --git a/bob/pad/face/test/test.py b/bob/pad/face/test/test.py
index 10043dfa..d28cf49c 100644
--- a/bob/pad/face/test/test.py
+++ b/bob/pad/face/test/test.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python
 # vim: set fileencoding=utf-8 :
-
 """Test Units
 """
 #==============================================================================
@@ -40,12 +39,12 @@ from ..utils import face_detection_utils
 import random
 
 
-
 def test_detect_face_landmarks_in_image_mtcnn():
 
     img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
     assert len(img) == 3
-    annotations = face_detection_utils.detect_face_landmarks_in_image(img, method = 'mtcnn')
+    annotations = face_detection_utils.detect_face_landmarks_in_image(
+        img, method='mtcnn')
     assert len(annotations['landmarks']) == 68
     assert len(annotations['left_eye']) == 2
     assert len(annotations['right_eye']) == 2
@@ -55,49 +54,48 @@ def test_detect_face_landmarks_in_image_mtcnn():
     #assert len(annotations['left_eye']) == (176, 220)
 
 
-
 def test_detect_face_landmarks_in_video_mtcnn():
 
     img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
     assert len(img) == 3
-    frame_container= bob.bio.video.FrameContainer()
-    frame_container.add(1,img)
-    frame_container.add(2,img)
+    frame_container = bob.bio.video.FrameContainer()
+    frame_container.add(1, img)
+    frame_container.add(2, img)
 
-    annotations = face_detection_utils.detect_face_landmarks_in_video(frame_container, method = 'mtcnn')
+    annotations = face_detection_utils.detect_face_landmarks_in_video(
+        frame_container, method='mtcnn')
     assert len(annotations) == 2
     assert len(annotations['1']['landmarks']) == 68
-    
-
 
 
 def test_detect_face_landmarks_in_image_dlib():
 
-	img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
-	assert len(img) == 3
-	annotations = face_detection_utils.detect_face_landmarks_in_image(img, method = 'dlib')
-	assert len(annotations['landmarks']) == 68
-	assert len(annotations['left_eye']) == 2
-	assert len(annotations['right_eye']) == 2
-	assert len(annotations['topleft']) == 2
-	assert len(annotations['bottomright']) == 2
-
-	#assert len(annotations['left_eye']) == (176, 220)
+    img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
+    assert len(img) == 3
+    annotations = face_detection_utils.detect_face_landmarks_in_image(
+        img, method='dlib')
+    assert len(annotations['landmarks']) == 68
+    assert len(annotations['left_eye']) == 2
+    assert len(annotations['right_eye']) == 2
+    assert len(annotations['topleft']) == 2
+    assert len(annotations['bottomright']) == 2
 
+    #assert len(annotations['left_eye']) == (176, 220)
 
 
 def test_detect_face_landmarks_in_video_dlib():
 
-	img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
-	assert len(img) == 3
-	frame_container= bob.bio.video.FrameContainer()
-	frame_container.add(1,img)
-	frame_container.add(2,img)
+    img = load(datafile('testimage.jpg', 'bob.bio.face.test'))
+    assert len(img) == 3
+    frame_container = bob.bio.video.FrameContainer()
+    frame_container.add(1, img)
+    frame_container.add(2, img)
+
+    annotations = face_detection_utils.detect_face_landmarks_in_video(
+        frame_container, method='dlib')
+    assert len(annotations) == 2
+    assert len(annotations['1']['landmarks']) == 68
 
-	annotations = face_detection_utils.detect_face_landmarks_in_video(frame_container, method = 'dlib')
-	assert len(annotations) == 2
-	assert len(annotations['1']['landmarks']) == 68
-	
 
 #==============================================================================
 def test_lbp_histogram():
@@ -118,13 +116,13 @@ def test_image_face_crop():
     image = load(datafile('test_image.png', 'bob.pad.face.test'))
     annotations = {'topleft': (95, 155), 'bottomright': (215, 265)}
 
-    preprocessor = ImageFaceCrop(face_size = 64, rgb_output_flag = False)
+    preprocessor = ImageFaceCrop(face_size=64, rgb_output_flag=False)
     face = preprocessor(image, annotations)
 
     assert face.shape == (64, 64)
     assert np.sum(face) == 429158
 
-    preprocessor = ImageFaceCrop(face_size = 64, rgb_output_flag = True)
+    preprocessor = ImageFaceCrop(face_size=64, rgb_output_flag=True)
     face = preprocessor(image, annotations)
 
     assert face.shape == (3, 64, 64)
@@ -161,13 +159,14 @@ def convert_image_to_video_data(image, annotations, n_frames):
         is the dictionary defining the coordinates of the face bounding box in frame N.
     """
 
-    frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+    frame_container = bob.bio.video.FrameContainer(
+    )  # initialize the FrameContainer
 
     video_annotations = {}
 
-    for idx, fn in enumerate( range(0, n_frames) ):
+    for idx, fn in enumerate(range(0, n_frames)):
 
-        frame_container.add(idx, image) # add current frame to FrameContainer
+        frame_container.add(idx, image)  # add current frame to FrameContainer
 
         video_annotations[str(idx)] = annotations
 
@@ -183,31 +182,32 @@ def test_video_face_crop():
     image = load(datafile('test_image.png', 'bob.pad.face.test'))
     annotations = {'topleft': (95, 155), 'bottomright': (215, 265)}
 
-    CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-    CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+    CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+    CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
     FIXED_POSITIONS = None
-    MASK_SIGMA = None             # The sigma for random values areas outside image
-    MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-    MASK_SEED = None              # The seed for generating random values during extrapolation
-    CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
-    MIN_FACE_SIZE = 50            # Minimal possible size of the face
-    USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-    COLOR_CHANNEL = 'gray'        # Convert image to gray-scale format
-
-    preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                                 cropped_positions = CROPPED_POSITIONS,
-                                 fixed_positions = FIXED_POSITIONS,
-                                 mask_sigma = MASK_SIGMA,
-                                 mask_neighbors = MASK_NEIGHBORS,
-                                 mask_seed = MASK_SEED,
-                                 check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                 min_face_size = MIN_FACE_SIZE,
-                                 use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                                 color_channel = COLOR_CHANNEL)
+    MASK_SIGMA = None  # The sigma for random values areas outside image
+    MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+    MASK_SEED = None  # The seed for generating random values during extrapolation
+    CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
+    MIN_FACE_SIZE = 50  # Minimal possible size of the face
+    USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+    COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
+
+    preprocessor = VideoFaceCrop(
+        cropped_image_size=CROPPED_IMAGE_SIZE,
+        cropped_positions=CROPPED_POSITIONS,
+        fixed_positions=FIXED_POSITIONS,
+        mask_sigma=MASK_SIGMA,
+        mask_neighbors=MASK_NEIGHBORS,
+        mask_seed=MASK_SEED,
+        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+        min_face_size=MIN_FACE_SIZE,
+        use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+        color_channel=COLOR_CHANNEL)
 
     video, annotations = convert_image_to_video_data(image, annotations, 20)
 
-    faces = preprocessor(frames = video, annotations = annotations)
+    faces = preprocessor(frames=video, annotations=annotations)
 
     assert len(faces) == 20
     assert faces[0][1].shape == (64, 64)
@@ -218,35 +218,36 @@ def test_video_face_crop():
     #==========================================================================
     # test another configuration of the  VideoFaceCrop preprocessor:
 
-    CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-    CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+    CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+    CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
     FIXED_POSITIONS = None
-    MASK_SIGMA = None             # The sigma for random values areas outside image
-    MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-    MASK_SEED = None              # The seed for generating random values during extrapolation
-    CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
+    MASK_SIGMA = None  # The sigma for random values areas outside image
+    MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+    MASK_SEED = None  # The seed for generating random values during extrapolation
+    CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
     MIN_FACE_SIZE = 50
-    USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-    RGB_OUTPUT_FLAG = True        # Return RGB cropped face using local cropper
-    DETECT_FACES_FLAG = True      # find annotations locally replacing the database annotations
+    USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+    RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
+    DETECT_FACES_FLAG = True  # find annotations locally replacing the database annotations
     FACE_DETECTION_METHOD = "dlib"
 
-    preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                                 cropped_positions = CROPPED_POSITIONS,
-                                 fixed_positions = FIXED_POSITIONS,
-                                 mask_sigma = MASK_SIGMA,
-                                 mask_neighbors = MASK_NEIGHBORS,
-                                 mask_seed = None,
-                                 check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                 min_face_size = MIN_FACE_SIZE,
-                                 use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                                 rgb_output_flag = RGB_OUTPUT_FLAG,
-                                 detect_faces_flag = DETECT_FACES_FLAG,
-                                 face_detection_method = FACE_DETECTION_METHOD)
+    preprocessor = VideoFaceCrop(
+        cropped_image_size=CROPPED_IMAGE_SIZE,
+        cropped_positions=CROPPED_POSITIONS,
+        fixed_positions=FIXED_POSITIONS,
+        mask_sigma=MASK_SIGMA,
+        mask_neighbors=MASK_NEIGHBORS,
+        mask_seed=None,
+        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+        min_face_size=MIN_FACE_SIZE,
+        use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+        rgb_output_flag=RGB_OUTPUT_FLAG,
+        detect_faces_flag=DETECT_FACES_FLAG,
+        face_detection_method=FACE_DETECTION_METHOD)
 
     video, _ = convert_image_to_video_data(image, annotations, 3)
 
-    faces = preprocessor(frames = video, annotations = annotations)
+    faces = preprocessor(frames=video, annotations=annotations)
 
     assert len(faces) == 3
     assert faces[0][1].shape == (3, 64, 64)
@@ -267,20 +268,22 @@ def test_frame_difference():
 
     n_frames = 20
 
-    video, annotations = convert_image_to_video_data(image, annotations, n_frames)
+    video, annotations = convert_image_to_video_data(image, annotations,
+                                                     n_frames)
 
-    NUMBER_OF_FRAMES = None # process all frames
-    CHECK_FACE_SIZE_FLAG = True # Check size of the face
-    MIN_FACE_SIZE = 50 # Minimal size of the face to consider
+    NUMBER_OF_FRAMES = None  # process all frames
+    CHECK_FACE_SIZE_FLAG = True  # Check size of the face
+    MIN_FACE_SIZE = 50  # Minimal size of the face to consider
 
-    preprocessor = FrameDifference(number_of_frames = NUMBER_OF_FRAMES,
-                                   check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                   min_face_size = MIN_FACE_SIZE)
+    preprocessor = FrameDifference(
+        number_of_frames=NUMBER_OF_FRAMES,
+        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+        min_face_size=MIN_FACE_SIZE)
 
-    diff = preprocessor(frames = video, annotations = annotations)
+    diff = preprocessor(frames=video, annotations=annotations)
 
-    assert diff.shape == (n_frames-1, 2)
-    assert (diff==0).all()
+    assert diff.shape == (n_frames - 1, 2)
+    assert (diff == 0).all()
 
 
 #==============================================================================
@@ -289,20 +292,19 @@ def test_frame_diff_features():
     Test FrameDiffFeatures extractor computing 10 features given frame differences.
     """
 
-    WINDOW_SIZE=20
-    OVERLAP=0
+    WINDOW_SIZE = 20
+    OVERLAP = 0
 
-    extractor = FrameDiffFeatures(window_size=WINDOW_SIZE,
-                                  overlap=OVERLAP)
+    extractor = FrameDiffFeatures(window_size=WINDOW_SIZE, overlap=OVERLAP)
 
-    data = np.transpose( np.vstack( [range(0,100), range(0,100)] ) )
+    data = np.transpose(np.vstack([range(0, 100), range(0, 100)]))
 
     features = extractor(data)
 
     assert len(features) == 5
     assert len(features[0][1]) == 10
     assert len(features[-1][1]) == 10
-    assert (features[0][1][0:5]==features[0][1][5:]).all()
+    assert (features[0][1][0:5] == features[0][1][5:]).all()
     assert (np.sum(features[0][1]) - 73.015116873109207) < 0.000001
 
 
@@ -315,53 +317,55 @@ def test_video_lbp_histogram():
     image = load(datafile('test_image.png', 'bob.pad.face.test'))
     annotations = {'topleft': (95, 155), 'bottomright': (215, 265)}
 
-    CROPPED_IMAGE_SIZE = (64, 64) # The size of the resulting face
-    CROPPED_POSITIONS = {'topleft' : (0,0) , 'bottomright' : CROPPED_IMAGE_SIZE}
+    CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
+    CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
     FIXED_POSITIONS = None
-    MASK_SIGMA = None             # The sigma for random values areas outside image
-    MASK_NEIGHBORS = 5            # The number of neighbors to consider while extrapolating
-    MASK_SEED = None              # The seed for generating random values during extrapolation
-    CHECK_FACE_SIZE_FLAG = True   # Check the size of the face
-    MIN_FACE_SIZE = 50            # Minimal possible size of the face
-    USE_LOCAL_CROPPER_FLAG = True # Use the local face cropping class (identical to Ivana's paper)
-    RGB_OUTPUT_FLAG = False       # The output is gray-scale
-    COLOR_CHANNEL = 'gray'        # Convert image to gray-scale format
-
-    preprocessor = VideoFaceCrop(cropped_image_size = CROPPED_IMAGE_SIZE,
-                                 cropped_positions = CROPPED_POSITIONS,
-                                 fixed_positions = FIXED_POSITIONS,
-                                 mask_sigma = MASK_SIGMA,
-                                 mask_neighbors = MASK_NEIGHBORS,
-                                 mask_seed = MASK_SEED,
-                                 check_face_size_flag = CHECK_FACE_SIZE_FLAG,
-                                 min_face_size = MIN_FACE_SIZE,
-                                 use_local_cropper_flag = USE_LOCAL_CROPPER_FLAG,
-                                 rgb_output_flag = RGB_OUTPUT_FLAG,
-                                 color_channel = COLOR_CHANNEL)
+    MASK_SIGMA = None  # The sigma for random values areas outside image
+    MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
+    MASK_SEED = None  # The seed for generating random values during extrapolation
+    CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
+    MIN_FACE_SIZE = 50  # Minimal possible size of the face
+    USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
+    RGB_OUTPUT_FLAG = False  # The output is gray-scale
+    COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
+
+    preprocessor = VideoFaceCrop(
+        cropped_image_size=CROPPED_IMAGE_SIZE,
+        cropped_positions=CROPPED_POSITIONS,
+        fixed_positions=FIXED_POSITIONS,
+        mask_sigma=MASK_SIGMA,
+        mask_neighbors=MASK_NEIGHBORS,
+        mask_seed=MASK_SEED,
+        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
+        min_face_size=MIN_FACE_SIZE,
+        use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
+        rgb_output_flag=RGB_OUTPUT_FLAG,
+        color_channel=COLOR_CHANNEL)
 
     video, annotations = convert_image_to_video_data(image, annotations, 20)
 
-    faces = preprocessor(frames = video, annotations = annotations)
+    faces = preprocessor(frames=video, annotations=annotations)
 
-    LBPTYPE='uniform'
-    ELBPTYPE='regular'
-    RAD=1
-    NEIGHBORS=8
-    CIRC=False
-    DTYPE=None
+    LBPTYPE = 'uniform'
+    ELBPTYPE = 'regular'
+    RAD = 1
+    NEIGHBORS = 8
+    CIRC = False
+    DTYPE = None
 
-    extractor = VideoLBPHistogram(lbptype=LBPTYPE,
-                                  elbptype=ELBPTYPE,
-                                  rad=RAD,
-                                  neighbors=NEIGHBORS,
-                                  circ=CIRC,
-                                  dtype=DTYPE)
+    extractor = VideoLBPHistogram(
+        lbptype=LBPTYPE,
+        elbptype=ELBPTYPE,
+        rad=RAD,
+        neighbors=NEIGHBORS,
+        circ=CIRC,
+        dtype=DTYPE)
 
     lbp_histograms = extractor(faces)
 
     assert len(lbp_histograms) == 20
     assert len(lbp_histograms[0][1]) == 59
-    assert (lbp_histograms[0][1]==lbp_histograms[-1][1]).all()
+    assert (lbp_histograms[0][1] == lbp_histograms[-1][1]).all()
     assert (lbp_histograms[0][1][0] - 0.12695109261186263) < 0.000001
     assert (lbp_histograms[0][1][-1] - 0.031737773152965658) < 0.000001
 
@@ -377,19 +381,17 @@ def test_video_quality_measure():
 
     video, annotations = convert_image_to_video_data(image, annotations, 2)
 
-    GALBALLY=True
-    MSU=True
-    DTYPE=None
+    GALBALLY = True
+    MSU = True
+    DTYPE = None
 
-    extractor = VideoQualityMeasure(galbally=GALBALLY,
-                                    msu=MSU,
-                                    dtype=DTYPE)
+    extractor = VideoQualityMeasure(galbally=GALBALLY, msu=MSU, dtype=DTYPE)
 
     features = extractor(video)
 
     assert len(features) == 2
     assert len(features[0][1]) == 139
-    assert (features[0][1]==features[-1][1]).all()
+    assert (features[0][1] == features[-1][1]).all()
     assert (features[0][1][0] - 2.7748559659812599e-05) < 0.000001
     assert (features[0][1][-1] - 0.16410418866596271) < 0.000001
 
@@ -415,11 +417,13 @@ def convert_array_to_list_of_frame_cont(data):
 
     for idx, vec in enumerate(data):
 
-        frame_container = bob.bio.video.FrameContainer() # initialize the FrameContainer
+        frame_container = bob.bio.video.FrameContainer(
+        )  # initialize the FrameContainer
 
         frame_container.add(0, vec)
 
-        frame_container_list.append( frame_container ) # add current frame to FrameContainer
+        frame_container_list.append(
+            frame_container)  # add current frame to FrameContainer
 
     return frame_container_list
 
@@ -435,11 +439,15 @@ def test_video_svm_pad_algorithm():
     N = 20000
     mu = 1
     sigma = 1
-    real_array = np.transpose( np.vstack([[random.gauss(mu, sigma) for _ in range(N)], [random.gauss(mu, sigma) for _ in range(N)]]) )
+    real_array = np.transpose(
+        np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
+                   [random.gauss(mu, sigma) for _ in range(N)]]))
 
     mu = 5
     sigma = 1
-    attack_array = np.transpose( np.vstack([[random.gauss(mu, sigma) for _ in range(N)], [random.gauss(mu, sigma) for _ in range(N)]]) )
+    attack_array = np.transpose(
+        np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
+                   [random.gauss(mu, sigma) for _ in range(N)]]))
 
     real = convert_array_to_list_of_frame_cont(real_array)
     attack = convert_array_to_list_of_frame_cont(attack_array)
@@ -450,35 +458,38 @@ def test_video_svm_pad_algorithm():
     KERNEL_TYPE = 'RBF'
     N_SAMPLES = 1000
     TRAINER_GRID_SEARCH_PARAMS = {'cost': [1], 'gamma': [0.5, 1]}
-    MEAN_STD_NORM_FLAG = True      # enable mean-std normalization
-    FRAME_LEVEL_SCORES_FLAG = True # one score per frame(!) in this case
-
-    algorithm = VideoSvmPadAlgorithm(machine_type = MACHINE_TYPE,
-                                     kernel_type = KERNEL_TYPE,
-                                     n_samples = N_SAMPLES,
-                                     trainer_grid_search_params = TRAINER_GRID_SEARCH_PARAMS,
-                                     mean_std_norm_flag = MEAN_STD_NORM_FLAG,
-                                     frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
-
-    machine = algorithm.train_svm(training_features = training_features,
-                             n_samples = algorithm.n_samples,
-                             machine_type = algorithm.machine_type,
-                             kernel_type = algorithm.kernel_type,
-                             trainer_grid_search_params = algorithm.trainer_grid_search_params,
-                             mean_std_norm_flag = algorithm.mean_std_norm_flag,
-                             projector_file = "",
-                             save_debug_data_flag = False)
+    MEAN_STD_NORM_FLAG = True  # enable mean-std normalization
+    FRAME_LEVEL_SCORES_FLAG = True  # one score per frame(!) in this case
+
+    algorithm = VideoSvmPadAlgorithm(
+        machine_type=MACHINE_TYPE,
+        kernel_type=KERNEL_TYPE,
+        n_samples=N_SAMPLES,
+        trainer_grid_search_params=TRAINER_GRID_SEARCH_PARAMS,
+        mean_std_norm_flag=MEAN_STD_NORM_FLAG,
+        frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
+
+    machine = algorithm.train_svm(
+        training_features=training_features,
+        n_samples=algorithm.n_samples,
+        machine_type=algorithm.machine_type,
+        kernel_type=algorithm.kernel_type,
+        trainer_grid_search_params=algorithm.trainer_grid_search_params,
+        mean_std_norm_flag=algorithm.mean_std_norm_flag,
+        projector_file="",
+        save_debug_data_flag=False)
 
     assert machine.n_support_vectors == [148, 150]
     assert machine.gamma == 0.5
 
     real_sample = algorithm.convert_frame_cont_to_array(real[0])
 
-    prob = machine.predict_class_and_probabilities( real_sample )[1]
+    prob = machine.predict_class_and_probabilities(real_sample)[1]
 
-    assert prob[0,0] > prob[0,1]
+    assert prob[0, 0] > prob[0, 1]
 
-    precision = algorithm.comp_prediction_precision(machine, real_array, attack_array)
+    precision = algorithm.comp_prediction_precision(machine, real_array,
+                                                    attack_array)
 
     assert precision > 0.99
 
@@ -494,11 +505,15 @@ def test_video_gmm_pad_algorithm():
     N = 1000
     mu = 1
     sigma = 1
-    real_array = np.transpose( np.vstack([[random.gauss(mu, sigma) for _ in range(N)], [random.gauss(mu, sigma) for _ in range(N)]]) )
+    real_array = np.transpose(
+        np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
+                   [random.gauss(mu, sigma) for _ in range(N)]]))
 
     mu = 5
     sigma = 1
-    attack_array = np.transpose( np.vstack([[random.gauss(mu, sigma) for _ in range(N)], [random.gauss(mu, sigma) for _ in range(N)]]) )
+    attack_array = np.transpose(
+        np.vstack([[random.gauss(mu, sigma) for _ in range(N)],
+                   [random.gauss(mu, sigma) for _ in range(N)]]))
 
     real = convert_array_to_list_of_frame_cont(real_array)
 
@@ -506,19 +521,22 @@ def test_video_gmm_pad_algorithm():
     RANDOM_STATE = 3
     FRAME_LEVEL_SCORES_FLAG = True
 
-    algorithm = VideoGmmPadAlgorithm(n_components = N_COMPONENTS,
-                                     random_state = RANDOM_STATE,
-                                     frame_level_scores_flag = FRAME_LEVEL_SCORES_FLAG)
+    algorithm = VideoGmmPadAlgorithm(
+        n_components=N_COMPONENTS,
+        random_state=RANDOM_STATE,
+        frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
 
     # training_features[0] - training features for the REAL class.
-    real_array_converted = algorithm.convert_list_of_frame_cont_to_array(real) # output is array
+    real_array_converted = algorithm.convert_list_of_frame_cont_to_array(
+        real)  # output is array
 
     assert (real_array == real_array_converted).all()
 
     # Train the GMM machine and get normalizers:
-    machine, features_mean, features_std = algorithm.train_gmm(real = real_array_converted,
-                                                               n_components = algorithm.n_components,
-                                                               random_state = algorithm.random_state)
+    machine, features_mean, features_std = algorithm.train_gmm(
+        real=real_array_converted,
+        n_components=algorithm.n_components,
+        random_state=algorithm.random_state)
 
     algorithm.machine = machine
 
@@ -534,7 +552,3 @@ def test_video_gmm_pad_algorithm():
     assert (np.max(scores_real) + 1.8380480068281055) < 0.000001
     assert (np.min(scores_attack) + 38.831260843070098) < 0.000001
     assert (np.max(scores_attack) + 5.3633030621521272) < 0.000001
-
-
-
-
diff --git a/bob/pad/face/test/test_databases.py b/bob/pad/face/test/test_databases.py
index 22fc3ca0..e9a01cf7 100644
--- a/bob/pad/face/test/test_databases.py
+++ b/bob/pad/face/test/test_databases.py
@@ -8,82 +8,162 @@ import bob.bio.base
 from bob.bio.base.test.utils import db_available
 
 
-@db_available('replay') # the name of the package
+@db_available('replay')  # the name of the package
 def test_replay():
-    replay_database_instance = bob.bio.base.load_resource('replay-attack', 'database', preferred_package='bob.pad.face', package_prefix='bob.pad.') # replay-attack is the name of the configuration file
+    replay_database_instance = bob.bio.base.load_resource(
+        'replay-attack',
+        'database',
+        preferred_package='bob.pad.face',
+        package_prefix='bob.pad.'
+    )  # replay-attack is the name of the configuration file
     try:
 
-        assert len( replay_database_instance.objects(groups=['train', 'dev', 'eval']) )==  1200
-        assert len( replay_database_instance.objects(groups=['train', 'dev']) ) ==  720
-        assert len( replay_database_instance.objects(groups=['train']) ) ==  360
-        assert len( replay_database_instance.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest') )==  1200
-        assert len( replay_database_instance.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='real') ) ==  200
-        assert len( replay_database_instance.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='attack') ) == 1000
+        assert len(
+            replay_database_instance.objects(
+                groups=['train', 'dev', 'eval'])) == 1200
+        assert len(
+            replay_database_instance.objects(groups=['train', 'dev'])) == 720
+        assert len(replay_database_instance.objects(groups=['train'])) == 360
+        assert len(
+            replay_database_instance.objects(
+                groups=['train', 'dev', 'eval'], protocol='grandtest')) == 1200
+        assert len(
+            replay_database_instance.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='real')) == 200
+        assert len(
+            replay_database_instance.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='attack')) == 1000
 
     except IOError as e:
         raise SkipTest(
-            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'" % e)
+            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'"
+            % e)
 
 
 @db_available('replaymobile')
 def test_replaymobile():
-    replaymobile = bob.bio.base.load_resource('replay-mobile', 'database', preferred_package='bob.pad.face', package_prefix='bob.pad.')
+    replaymobile = bob.bio.base.load_resource(
+        'replay-mobile',
+        'database',
+        preferred_package='bob.pad.face',
+        package_prefix='bob.pad.')
     try:
 
-        assert len( replaymobile.objects(groups=['train', 'dev', 'eval']) )==  1030
-        assert len( replaymobile.objects(groups=['train', 'dev']) ) ==  728
-        assert len( replaymobile.objects(groups=['train']) ) ==  312
-        assert len( replaymobile.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest') )==  1030
-        assert len( replaymobile.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='real') ) ==  390
-        assert len( replaymobile.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='attack') ) == 640
+        assert len(
+            replaymobile.objects(groups=['train', 'dev', 'eval'])) == 1030
+        assert len(replaymobile.objects(groups=['train', 'dev'])) == 728
+        assert len(replaymobile.objects(groups=['train'])) == 312
+        assert len(
+            replaymobile.objects(
+                groups=['train', 'dev', 'eval'], protocol='grandtest')) == 1030
+        assert len(
+            replaymobile.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='real')) == 390
+        assert len(
+            replaymobile.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='attack')) == 640
 
     except IOError as e:
         raise SkipTest(
-            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'" % e)
+            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'"
+            % e)
 
 
 @db_available('msu_mfsd_mod')
 def test_msu_mfsd():
-    msu_mfsd = bob.bio.base.load_resource('msu-mfsd', 'database', preferred_package='bob.pad.face', package_prefix='bob.pad.')
+    msu_mfsd = bob.bio.base.load_resource(
+        'msu-mfsd',
+        'database',
+        preferred_package='bob.pad.face',
+        package_prefix='bob.pad.')
     try:
 
-        assert len( msu_mfsd.objects(groups=['train', 'dev', 'eval']) )==  280
-        assert len( msu_mfsd.objects(groups=['train', 'dev']) ) ==  160
-        assert len( msu_mfsd.objects(groups=['train']) ) ==  80
-        assert len( msu_mfsd.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest') )==  280
-        assert len( msu_mfsd.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='real') ) ==  70
-        assert len( msu_mfsd.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='attack') ) == 210
+        assert len(msu_mfsd.objects(groups=['train', 'dev', 'eval'])) == 280
+        assert len(msu_mfsd.objects(groups=['train', 'dev'])) == 160
+        assert len(msu_mfsd.objects(groups=['train'])) == 80
+        assert len(
+            msu_mfsd.objects(
+                groups=['train', 'dev', 'eval'], protocol='grandtest')) == 280
+        assert len(
+            msu_mfsd.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='real')) == 70
+        assert len(
+            msu_mfsd.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='attack')) == 210
 
     except IOError as e:
         raise SkipTest(
-            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'" % e)
+            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'"
+            % e)
 
 
 # Test the Aggregated database, which doesn't have a package
 def test_aggregated_db():
-    aggregated_db = bob.bio.base.load_resource('aggregated-db', 'database', preferred_package='bob.pad.face', package_prefix='bob.pad.')
+    aggregated_db = bob.bio.base.load_resource(
+        'aggregated-db',
+        'database',
+        preferred_package='bob.pad.face',
+        package_prefix='bob.pad.')
     try:
 
-        assert len( aggregated_db.objects(groups=['train', 'dev', 'eval']) )==  2510
-        assert len( aggregated_db.objects(groups=['train', 'dev']) ) ==  1608
-        assert len( aggregated_db.objects(groups=['train']) ) ==  752
-
-        assert len( aggregated_db.objects(groups='train') ) ==  752
-        assert len( aggregated_db.objects(groups='dev') ) ==  856
-        assert len( aggregated_db.objects(groups='eval') ) ==  902
-
-        assert len( aggregated_db.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest') ) ==  2510
-        assert len( aggregated_db.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='real') ) ==  660
-        assert len( aggregated_db.objects(groups=['train', 'dev', 'eval'], protocol = 'grandtest', purposes='attack') ) == 1850
-
-        assert len( aggregated_db.objects(groups=['train', 'dev', 'eval'], protocol = 'photo-photo-video')) ==  1664
-        assert len( aggregated_db.objects(groups=['train', 'dev'], protocol = 'photo-photo-video')) ==  1176
-        assert len( aggregated_db.objects(groups='eval',  protocol = 'photo-photo-video')) ==  488
-
-        assert len( aggregated_db.objects(groups=['train', 'dev', 'eval'], protocol = 'video-video-photo')) ==  1506
-        assert len( aggregated_db.objects(groups=['train', 'dev'], protocol = 'video-video-photo')) ==  872
-        assert len( aggregated_db.objects(groups='eval',  protocol = 'video-video-photo')) ==  634
+        assert len(
+            aggregated_db.objects(groups=['train', 'dev', 'eval'])) == 2510
+        assert len(aggregated_db.objects(groups=['train', 'dev'])) == 1608
+        assert len(aggregated_db.objects(groups=['train'])) == 752
+
+        assert len(aggregated_db.objects(groups='train')) == 752
+        assert len(aggregated_db.objects(groups='dev')) == 856
+        assert len(aggregated_db.objects(groups='eval')) == 902
+
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev', 'eval'], protocol='grandtest')) == 2510
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='real')) == 660
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='grandtest',
+                purposes='attack')) == 1850
+
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='photo-photo-video')) == 1664
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev'], protocol='photo-photo-video')) == 1176
+        assert len(
+            aggregated_db.objects(groups='eval',
+                                  protocol='photo-photo-video')) == 488
+
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev', 'eval'],
+                protocol='video-video-photo')) == 1506
+        assert len(
+            aggregated_db.objects(
+                groups=['train', 'dev'], protocol='video-video-photo')) == 872
+        assert len(
+            aggregated_db.objects(groups='eval',
+                                  protocol='video-video-photo')) == 634
 
     except IOError as e:
         raise SkipTest(
-            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'" % e)
+            "The database could not be queried; probably the db.sql3 file is missing. Here is the error: '%s'"
+            % e)
diff --git a/bob/pad/face/utils/face_detection_utils.py b/bob/pad/face/utils/face_detection_utils.py
index 6bb51b45..164b153b 100644
--- a/bob/pad/face/utils/face_detection_utils.py
+++ b/bob/pad/face/utils/face_detection_utils.py
@@ -1,6 +1,5 @@
 #!/usr/bin/env python
 # vim: set fileencoding=utf-8 :
-
 """
 This file contains face detection utils.
 """
@@ -13,7 +12,6 @@ import numpy as np
 
 #==============================================================================
 def get_eye_pos(lm):
-
     """
     This function returns the locations of left and right eyes
 
@@ -34,17 +32,17 @@ def get_eye_pos(lm):
 
     # Mean position of eye corners as eye centers , casted to int()
 
-    left_eye_t = (lm[36,:] + lm[39,:])/2.0
-    right_eye_t = (lm[42,:] + lm[45,:])/2.0
+    left_eye_t = (lm[36, :] + lm[39, :]) / 2.0
+    right_eye_t = (lm[42, :] + lm[45, :]) / 2.0
 
-    right_eye = (int(left_eye_t[1]),int(left_eye_t[0]))
-    left_eye = (int(right_eye_t[1]),int(right_eye_t[0]))
+    right_eye = (int(left_eye_t[1]), int(left_eye_t[0]))
+    left_eye = (int(right_eye_t[1]), int(right_eye_t[0]))
 
-    return right_eye,left_eye
+    return right_eye, left_eye
 
 
 #==============================================================================
-def detect_face_in_image(image, method = "dlib"):
+def detect_face_in_image(image, method="dlib"):
     """
     This function detects a face in the input image.
 
@@ -73,13 +71,14 @@ def detect_face_in_image(image, method = "dlib"):
         raise ImportError("No module named bob.ip." + method)
 
     if not hasattr(face_detection_module, 'FaceDetector'):
-        raise AttributeError("bob.ip." + method + " module has no attribute FaceDetector")
+        raise AttributeError(
+            "bob.ip." + method + " module has no attribute FaceDetector")
 
     data = face_detection_module.FaceDetector().detect_single_face(image)
 
     annotations = {}
 
-    if ( data is not None ) and ( not all([x is None for x in data]) ):
+    if (data is not None) and (not all([x is None for x in data])):
 
         bounding_box = data[0]
 
@@ -91,7 +90,7 @@ def detect_face_in_image(image, method = "dlib"):
 
 
 #==============================================================================
-def detect_faces_in_video(frame_container, method = "dlib"):
+def detect_faces_in_video(frame_container, method="dlib"):
     """
     This function detects a face in each farme of the input video.
 
@@ -132,7 +131,7 @@ def detect_faces_in_video(frame_container, method = "dlib"):
 
 
 #==============================================================================
-def detect_face_landmarks_in_image(image, method = "dlib"):
+def detect_face_landmarks_in_image(image, method="dlib"):
     """
     This function detects a face in the input image. Two oprions for face detector , but landmark detector is always the same
 
@@ -159,11 +158,12 @@ def detect_face_landmarks_in_image(image, method = "dlib"):
     ### Face detector
 
     try:
-        face_detection_module = importlib.import_module("bob.ip."+ method)
+        face_detection_module = importlib.import_module("bob.ip." + method)
 
     except ImportError:
 
-        print("No module named bob.ip." + method + " trying to use default method!")
+        print("No module named bob.ip." + method +
+              " trying to use default method!")
 
         try:
             face_detection_module = importlib.import_module("bob.ip.dlib")
@@ -172,17 +172,22 @@ def detect_face_landmarks_in_image(image, method = "dlib"):
             raise ImportError("No module named bob.ip.dlib")
 
     if not hasattr(face_detection_module, 'FaceDetector'):
-        raise AttributeError("bob.ip." + method + " module has no attribute FaceDetector!")
+        raise AttributeError(
+            "bob.ip." + method + " module has no attribute FaceDetector!")
 
     #### Landmark detector
 
     try:
-        landmark_detection_module = importlib.import_module("bob.ip.facelandmarks")
+        landmark_detection_module = importlib.import_module(
+            "bob.ip.facelandmarks")
     except ImportError:
         raise ImportError("No module named bob.ip.facelandmarks!!")
 
-    if not hasattr(landmark_detection_module, 'detect_landmarks_on_boundingbox'):
-        raise AttributeError("bob.ip.facelandmarksmodule has no attribute detect_landmarks_on_boundingbox!")
+    if not hasattr(landmark_detection_module,
+                   'detect_landmarks_on_boundingbox'):
+        raise AttributeError(
+            "bob.ip.facelandmarksmodule has no attribute detect_landmarks_on_boundingbox!"
+        )
 
     face_detector = face_detection_module.FaceDetector()
 
@@ -190,29 +195,30 @@ def detect_face_landmarks_in_image(image, method = "dlib"):
 
     annotations = {}
 
-    if ( data is not None ) and ( not all([x is None for x in data]) ):
+    if (data is not None) and (not all([x is None for x in data])):
 
         bounding_box = data[0]
 
-        bounding_box_scaled = bounding_box.scale(0.95, True) # is ok for dlib
+        bounding_box_scaled = bounding_box.scale(0.95, True)  # is ok for dlib
 
-        lm=landmark_detection_module.detect_landmarks_on_boundingbox(image, bounding_box_scaled)
+        lm = landmark_detection_module.detect_landmarks_on_boundingbox(
+            image, bounding_box_scaled)
 
         if lm is not None:
 
-            lm=np.array(lm)
+            lm = np.array(lm)
 
-            lm=np.vstack((lm[:,1],lm[:,0])).T
+            lm = np.vstack((lm[:, 1], lm[:, 0])).T
 
             #print("LM",lm)
 
-            right_eye,left_eye = get_eye_pos(lm)
+            right_eye, left_eye = get_eye_pos(lm)
 
             points = []
 
             for i in range(lm.shape[0]):
 
-                points.append((int(lm[i,0]),int(lm[i,1])))
+                points.append((int(lm[i, 0]), int(lm[i, 1])))
 
             annotations['topleft'] = bounding_box.topleft
 
@@ -228,7 +234,7 @@ def detect_face_landmarks_in_image(image, method = "dlib"):
 
 
 #==============================================================================
-def detect_face_landmarks_in_video(frame_container, method = "dlib"):
+def detect_face_landmarks_in_video(frame_container, method="dlib"):
     """
     This function detects a face and face landmarks  in each farme of the input video.
 
@@ -259,18 +265,10 @@ def detect_face_landmarks_in_video(frame_container, method = "dlib"):
 
         image = frame[1]
 
-        frame_annotations = detect_face_landmarks_in_image(image, method);
+        frame_annotations = detect_face_landmarks_in_image(image, method)
 
         if frame_annotations:
 
             annotations[str(idx)] = frame_annotations
 
     return annotations
-
-
-
-
-
-
-
-
diff --git a/bootstrap-buildout.py b/bootstrap-buildout.py
index a4599211..39449d35 100644
--- a/bootstrap-buildout.py
+++ b/bootstrap-buildout.py
@@ -43,40 +43,51 @@ this script from going over the network.
 '''
 
 parser = OptionParser(usage=usage)
-parser.add_option("--version",
-                  action="store_true", default=False,
-                  help=("Return bootstrap.py version."))
-parser.add_option("-t", "--accept-buildout-test-releases",
-                  dest='accept_buildout_test_releases',
-                  action="store_true", default=False,
-                  help=("Normally, if you do not specify a --version, the "
-                        "bootstrap script and buildout gets the newest "
-                        "*final* versions of zc.buildout and its recipes and "
-                        "extensions for you.  If you use this flag, "
-                        "bootstrap and buildout will get the newest releases "
-                        "even if they are alphas or betas."))
-parser.add_option("-c", "--config-file",
-                  help=("Specify the path to the buildout configuration "
-                        "file to be used."))
-parser.add_option("-f", "--find-links",
-                  help=("Specify a URL to search for buildout releases"))
-parser.add_option("--allow-site-packages",
-                  action="store_true", default=False,
-                  help=("Let bootstrap.py use existing site packages"))
-parser.add_option("--buildout-version",
-                  help="Use a specific zc.buildout version")
-parser.add_option("--setuptools-version",
-                  help="Use a specific setuptools version")
-parser.add_option("--setuptools-to-dir",
-                  help=("Allow for re-use of existing directory of "
-                        "setuptools versions"))
+parser.add_option(
+    "--version",
+    action="store_true",
+    default=False,
+    help=("Return bootstrap.py version."))
+parser.add_option(
+    "-t",
+    "--accept-buildout-test-releases",
+    dest='accept_buildout_test_releases',
+    action="store_true",
+    default=False,
+    help=("Normally, if you do not specify a --version, the "
+          "bootstrap script and buildout gets the newest "
+          "*final* versions of zc.buildout and its recipes and "
+          "extensions for you.  If you use this flag, "
+          "bootstrap and buildout will get the newest releases "
+          "even if they are alphas or betas."))
+parser.add_option(
+    "-c",
+    "--config-file",
+    help=("Specify the path to the buildout configuration "
+          "file to be used."))
+parser.add_option(
+    "-f",
+    "--find-links",
+    help=("Specify a URL to search for buildout releases"))
+parser.add_option(
+    "--allow-site-packages",
+    action="store_true",
+    default=False,
+    help=("Let bootstrap.py use existing site packages"))
+parser.add_option(
+    "--buildout-version", help="Use a specific zc.buildout version")
+parser.add_option(
+    "--setuptools-version", help="Use a specific setuptools version")
+parser.add_option(
+    "--setuptools-to-dir",
+    help=("Allow for re-use of existing directory of "
+          "setuptools versions"))
 
 options, args = parser.parse_args()
 if options.version:
     print("bootstrap.py version %s" % __version__)
     sys.exit(0)
 
-
 ######################################################################
 # load/install setuptools
 
@@ -87,9 +98,9 @@ except ImportError:
 
 ez = {}
 if os.path.exists('ez_setup.py'):
-    exec(open('ez_setup.py').read(), ez)
+    exec (open('ez_setup.py').read(), ez)
 else:
-    exec(urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
+    exec (urlopen('https://bootstrap.pypa.io/ez_setup.py').read(), ez)
 
 if not options.allow_site_packages:
     # ez_setup imports site, which adds site packages
@@ -104,8 +115,9 @@ if not options.allow_site_packages:
             # are not sys.prefix; this is because on Windows
             # sys.prefix is a site-package directory.
             if sitepackage_path != sys.prefix:
-                sys.path[:] = [x for x in sys.path
-                               if sitepackage_path not in x]
+                sys.path[:] = [
+                    x for x in sys.path if sitepackage_path not in x
+                ]
 
 setup_args = dict(to_dir=tmpeggs, download_delay=0)
 
@@ -133,17 +145,17 @@ setuptools_path = ws.find(
     pkg_resources.Requirement.parse('setuptools')).location
 
 # Fix sys.path here as easy_install.pth added before PYTHONPATH
-cmd = [sys.executable, '-c',
-       'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
-       'from setuptools.command.easy_install import main; main()',
-       '-mZqNxd', tmpeggs]
+cmd = [
+    sys.executable, '-c',
+    'import sys; sys.path[0:0] = [%r]; ' % setuptools_path +
+    'from setuptools.command.easy_install import main; main()', '-mZqNxd',
+    tmpeggs
+]
 
 find_links = os.environ.get(
-    'bootstrap-testing-find-links',
-    options.find_links or
-    ('http://downloads.buildout.org/'
-     if options.accept_buildout_test_releases else None)
-    )
+    'bootstrap-testing-find-links', options.find_links
+    or ('http://downloads.buildout.org/'
+        if options.accept_buildout_test_releases else None))
 if find_links:
     cmd.extend(['-f', find_links])
 
@@ -167,7 +179,7 @@ if version is None and not options.accept_buildout_test_releases:
     index = setuptools.package_index.PackageIndex(
         search_path=[setuptools_path])
     if find_links:
-        index.add_find_links((find_links,))
+        index.add_find_links((find_links, ))
     req = pkg_resources.Requirement.parse(requirement)
     if index.obtain(req) is not None:
         best = []
@@ -189,8 +201,7 @@ cmd.append(requirement)
 
 import subprocess
 if subprocess.call(cmd) != 0:
-    raise Exception(
-        "Failed to execute command:\n%s" % repr(cmd)[1:-1])
+    raise Exception("Failed to execute command:\n%s" % repr(cmd)[1:-1])
 
 ######################################################################
 # Import and run buildout
diff --git a/doc/conf.py b/doc/conf.py
index 2dbdd540..ca122937 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -6,7 +6,6 @@ import sys
 import glob
 import pkg_resources
 
-
 # -- General configuration -----------------------------------------------------
 
 # If your documentation needs a minimal Sphinx version, state it here.
@@ -26,7 +25,7 @@ extensions = [
     'sphinx.ext.napoleon',
     'sphinx.ext.viewcode',
     #'matplotlib.sphinxext.plot_directive'
-    ]
+]
 
 import sphinx
 if sphinx.__version__ >= "1.4.1":
@@ -48,7 +47,7 @@ if os.path.exists('nitpick-exceptions.txt'):
             continue
         dtype, target = line.split(None, 1)
         target = target.strip()
-        try: # python 2.x
+        try:  # python 2.x
             target = unicode(target)
         except NameError:
             pass
@@ -135,7 +134,6 @@ project_variable = project.replace('.', '_')
 short_description = u'Presentation Attack Detection in Face Biometrics'
 owner = [u'Idiap Research Institute']
 
-
 # -- Options for HTML output ---------------------------------------------------
 
 # The theme to use for HTML and HTML Help pages.  See the documentation for
@@ -216,7 +214,6 @@ html_favicon = 'img/favicon.ico'
 # Output file base name for HTML help builder.
 htmlhelp_basename = project_variable + u'_doc'
 
-
 # -- Post configuration --------------------------------------------------------
 
 # Included after all input documents
@@ -224,45 +221,46 @@ rst_epilog = """
 .. |project| replace:: Bob
 .. |version| replace:: %s
 .. |current-year| date:: %%Y
-""" % (version,)
+""" % (version, )
 
 # Default processing flags for sphinx
 autoclass_content = 'class'
 autodoc_member_order = 'bysource'
 autodoc_default_flags = [
-  'members',
-  'undoc-members',
-  'show-inheritance',
-  ]
+    'members',
+    'undoc-members',
+    'show-inheritance',
+]
 
 # For inter-documentation mapping:
 from bob.extension.utils import link_documentation, load_requirements
 sphinx_requirements = "extra-intersphinx.txt"
 if os.path.exists(sphinx_requirements):
-  intersphinx_mapping = link_documentation(
-      additional_packages=['python','numpy'] + \
-          load_requirements(sphinx_requirements)
-          )
+    intersphinx_mapping = link_documentation(
+        additional_packages=['python','numpy'] + \
+            load_requirements(sphinx_requirements)
+            )
 else:
-  intersphinx_mapping = link_documentation()
-
+    intersphinx_mapping = link_documentation()
 
 # We want to remove all private (i.e. _. or __.__) members
 # that are not in the list of accepted functions
 accepted_private_functions = ['__array__']
 
+
 def member_function_test(app, what, name, obj, skip, options):
-  # test if we have a private function
-  if len(name) > 1 and name[0] == '_':
-    # test if this private function should be allowed
-    if name not in accepted_private_functions:
-      # omit privat functions that are not in the list of accepted private functions
-      return skip
-    else:
-      # test if the method is documented
-      if not hasattr(obj, '__doc__') or not obj.__doc__:
-        return skip
-  return False
+    # test if we have a private function
+    if len(name) > 1 and name[0] == '_':
+        # test if this private function should be allowed
+        if name not in accepted_private_functions:
+            # omit privat functions that are not in the list of accepted private functions
+            return skip
+        else:
+            # test if the method is documented
+            if not hasattr(obj, '__doc__') or not obj.__doc__:
+                return skip
+    return False
+
 
 def setup(app):
-  app.connect('autodoc-skip-member', member_function_test)
+    app.connect('autodoc-skip-member', member_function_test)
diff --git a/setup.py b/setup.py
index 5994338a..f3f1ad2e 100644
--- a/setup.py
+++ b/setup.py
@@ -2,7 +2,7 @@
 # vim: set fileencoding=utf-8 :
 
 from setuptools import setup, dist
-dist.Distribution(dict(setup_requires = ['bob.extension']))
+dist.Distribution(dict(setup_requires=['bob.extension']))
 
 # load the requirements.txt for additional requirements
 from bob.extension.utils import load_requirements, find_packages
@@ -14,31 +14,31 @@ setup(
 
     # This is the basic information about your project. Modify all this
     # information before releasing code publicly.
-    name = 'bob.pad.face',
-    version = open("version.txt").read().rstrip(),
-    description = 'Implements tools for spoofing or presentation attack detection in face biometrics',
-
-    url = 'https://gitlab.idiap.ch/bob/bob.pad.face',
-    license = 'GPLv3',
-    author = 'Olegs Nikisins',
-    author_email = 'olegs.nikisins@idiap.ch',
-    keywords = 'bob',
+    name='bob.pad.face',
+    version=open("version.txt").read().rstrip(),
+    description=
+    'Implements tools for spoofing or presentation attack detection in face biometrics',
+    url='https://gitlab.idiap.ch/bob/bob.pad.face',
+    license='GPLv3',
+    author='Olegs Nikisins',
+    author_email='olegs.nikisins@idiap.ch',
+    keywords='bob',
 
     # If you have a better, long description of your package, place it on the
     # 'doc' directory and then hook it here
-    long_description = open('README.rst').read(),
+    long_description=open('README.rst').read(),
 
     # This line is required for any distutils based packaging.
     # It will find all package-data inside the 'bob' directory.
-    packages = find_packages('bob'),
-    include_package_data = True,
+    packages=find_packages('bob'),
+    include_package_data=True,
 
     # This line defines which packages should be installed when you "install"
     # this package. All packages that are mentioned here, but are not installed
     # on the current system will be installed locally and only visible to the
     # scripts of this package. Don't worry - You won't need administrative
     # privileges when using buildout.
-    install_requires = install_requires,
+    install_requires=install_requires,
 
     # This entry defines which scripts you will have inside the 'bin' directory
     # once you install the package (or run 'bin/buildout'). The order of each
@@ -54,12 +54,12 @@ setup(
     #
     # In this simple example we will create a single program that will print
     # the version of bob.
-    entry_points = {
+    entry_points={
 
         # scripts should be declared using this entry:
-        'console_scripts' : [
+        'console_scripts': [
             'version.py = bob.pad.face.script.version:main',
-            ],
+        ],
 
         # registered databases:
         'bob.pad.database': [
@@ -68,7 +68,7 @@ setup(
             'msu-mfsd = bob.pad.face.config.database.msu_mfsd:database',
             'aggregated-db = bob.pad.face.config.database.aggregated_db:database',
             'mifs = bob.pad.face.config.database.mifs:database',
-            ],
+        ],
 
         # registered configurations:
         'bob.bio.config': [
@@ -82,10 +82,8 @@ setup(
             # baselines using SVM:
             'lbp-svm = bob.pad.face.config.lbp_svm',
             'lbp-svm-aggregated-db = bob.pad.face.config.lbp_svm_aggregated_db',
-
             'qm-svm = bob.pad.face.config.qm_svm',
             'qm-svm-aggregated-db = bob.pad.face.config.qm_svm_aggregated_db',
-
             'frame-diff-svm = bob.pad.face.config.frame_diff_svm',
             'frame-diff-svm-aggregated-db = bob.pad.face.config.frame_diff_svm_aggregated_db',
 
@@ -94,51 +92,50 @@ setup(
             'qm-one-class-svm-cascade-aggregated-db = bob.pad.face.config.qm_one_class_svm_cascade_aggregated_db',
 
             # baselines using LR:
-            'qm-lr = bob.pad.face.config.qm_lr', # this pipe-line can be used both for individual and Aggregated databases.
+            'qm-lr = bob.pad.face.config.qm_lr',  # this pipe-line can be used both for individual and Aggregated databases.
 
             # baselines using GMM:
-            'qm-one-class-gmm = bob.pad.face.config.qm_one_class_gmm', # this pipe-line can be used both for individual and Aggregated databases.
-            ],
+            'qm-one-class-gmm = bob.pad.face.config.qm_one_class_gmm',  # this pipe-line can be used both for individual and Aggregated databases.
+        ],
 
         # registered preprocessors:
         'bob.pad.preprocessor': [
-            'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor', # no preprocessing
-            'rgb-face-detect-dlib = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_dlib', # detect faces locally replacing database annotations
-            'rgb-face-detect-mtcnn = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_mtcnn', # detect faces locally replacing database annotations
-            ],
+            'empty-preprocessor = bob.pad.face.config.preprocessor.filename:empty_preprocessor',  # no preprocessing
+            'rgb-face-detect-dlib = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_dlib',  # detect faces locally replacing database annotations
+            'rgb-face-detect-mtcnn = bob.pad.face.config.preprocessor.video_face_crop:rgb_face_detector_mtcnn',  # detect faces locally replacing database annotations
+        ],
 
         # registered extractors:
         'bob.pad.extractor': [
             'video-lbp-histogram-extractor-n8r1-uniform = bob.pad.face.config.extractor.video_lbp_histogram:video_lbp_histogram_extractor_n8r1_uniform',
             'video-quality-measure-galbally-msu = bob.pad.face.config.extractor.video_quality_measure:video_quality_measure_galbally_msu',
             'frame-diff-feat-extr-w20-over0 = bob.pad.face.config.extractor.frame_diff_features:frame_diff_feat_extr_w20_over0',
-            ],
+        ],
 
         # registered algorithms:
         'bob.pad.algorithm': [
             'video-svm-pad-algorithm-10k-grid-mean-std = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_10k_grid_mean_std',
             'video-svm-pad-algorithm-10k-grid-mean-std-frame-level = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_10k_grid_mean_std_frame_level',
             'video-svm-pad-algorithm-default-svm-param-mean-std-frame-level = bob.pad.face.config.algorithm.video_svm_pad_algorithm:video_svm_pad_algorithm_default_svm_param_mean_std_frame_level',
-            ],
+        ],
 
         # registered grid configurations:
         'bob.pad.grid': [
             'idiap = bob.pad.face.config.grid:idiap',
             'idiap-user-machines = bob.pad.face.config.grid:idiap_user_machines',
-            ],
-
+        ],
     },
 
     # Classifiers are important if you plan to distribute this package through
     # PyPI. You can find the complete list of classifiers that are valid and
     # useful here (http://pypi.python.org/pypi?%3Aaction=list_classifiers).
-    classifiers = [
-      'Framework :: Bob',
-      'Development Status :: 3 - Alpha',
-      'Intended Audience :: Developers',
-      'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
-      'Natural Language :: English',
-      'Programming Language :: Python',
-      'Topic :: Scientific/Engineering :: Artificial Intelligence',
+    classifiers=[
+        'Framework :: Bob',
+        'Development Status :: 3 - Alpha',
+        'Intended Audience :: Developers',
+        'License :: OSI Approved :: GNU General Public License v3 (GPLv3)',
+        'Natural Language :: English',
+        'Programming Language :: Python',
+        'Topic :: Scientific/Engineering :: Artificial Intelligence',
     ],
 )
-- 
GitLab