diff --git a/bob/paper/mccnn/tifs2018/config/FASNet_config.py b/bob/paper/mccnn/tifs2018/config/FASNet_config.py
index 86978a92822c2744fb4d70bfef6f833dface8377..fe0fa620ae95977ef5c3506f33b9af16559ef3d1 100644
--- a/bob/paper/mccnn/tifs2018/config/FASNet_config.py
+++ b/bob/paper/mccnn/tifs2018/config/FASNet_config.py
@@ -17,6 +17,8 @@ from torchvision import transforms
 
 from bob.learn.pytorch.datasets import ChannelSelect
 
+import os
+
 # names of the channels to process:
 _channel_names = ['color', 'depth', 'infrared', 'thermal']
 
@@ -98,29 +100,27 @@ from bob.bio.video.extractor import Wrapper
 
 # If you want to use the pretrained model
 
-USE_PRETRAINED_MODEL=True
+USE_PRETRAINED_MODEL = True
 
 if USE_PRETRAINED_MODEL:
 
-  import pkg_resources
+    import pkg_resources
 
-  MODEL_FILE = pkg_resources.resource_filename('bob.paper.mccnn.tifs2018', 'models/fasnet.pth')
+    MODEL_FILE = pkg_resources.resource_filename('bob.paper.mccnn.tifs2018', 'models/fasnet.pth')
 
-  URL = 'https://www.idiap.ch/software/bob/data/bob/bob.paper.mccnn.tifs2018/master/fasnet.pth'
+    URL = 'https://www.idiap.ch/software/bob/data/bob/bob.paper.mccnn.tifs2018/master/fasnet.pth'
 
-  if not os.path.exists(MODEL_FILE):
+    if not os.path.exists(MODEL_FILE):
 
-      logger.info('Downloading the FASNet model')
+        logger.info('Downloading the FASNet model')
 
-      bob.extension.download.download_file(URL, MODEL_FILE)
+        bob.extension.download.download_file(URL, MODEL_FILE)
 
-      logger.info('Downloaded FASNet model to location: {}'.format(MODEL_FILE))
+        logger.info('Downloaded FASNet model to location: {}'.format(MODEL_FILE))
 
 else:
 
-  MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'
-
-
+    MODEL_FILE = None  # Replace with '<PATH_TO_MODEL>'
 
 
 _img_transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_color.py b/bob/paper/mccnn/tifs2018/database/batl_db_color.py
index 09060cf6b9f5fe9dd3e5728d12d45b256b814a6b..fc83b38a91cd52e868c9a4317910737458ef77e7 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_color.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_color.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_depth.py b/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
index 5e982a6725850d237544a5ca700c90d435c66796..0803ad383730e987ee85267a0ce04307a70311df 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-depth-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py b/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
index e69e8c966f170db221f981fb19ffefe85d7351e6..5e14b962fbec6fc747f166f042274239356fa577 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-infrared-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py b/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
index aa05a0ec6dec25507e33e930affec234fa5791e7..6ed39f9ace3f09321f2238fbb69fe7ff0a54580c 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-color*infrared*depth*thermal-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py b/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
index c04b715d228fb0ffe639f3657efbe26f8d2f6df6..e1518f47b23397f58f38b92d5ddae699f23f964c 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-thermal-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
index 13bea64c5eb15aa8a6d9e0a0cd8929376bb46031..aede4aafb79279987e3888d431e47c1c3b2d3053 100644
--- a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
+++ b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
@@ -4,7 +4,7 @@ import bob.bio.video
 import bob.ip.base
 import numpy as np
 
-#Extra packages
+# Extra packages
 import pywt
 import importlib
 
@@ -42,14 +42,12 @@ class HaralickRDWT(Extractor):
             n_hor=n_hor,
             n_vert=n_vert)
 
-    
         self.dtype = dtype
-        self.wavelet=wavelet
+        self.wavelet = wavelet
         self.n_hor = n_hor
         self.n_vert = n_vert
 
-
-    def min_max_norm(self,img,do_norm):
+    def min_max_norm(self, img, do_norm):
         """
         Normalizes the image to 0-255 range based on min max range, and cast it to 'int8'
 
@@ -62,12 +60,10 @@ class HaralickRDWT(Extractor):
 
             data_n = ((img-t_min)/(t_max-t_min))*255.0
         else:
-            data_n=img.copy()
-
+            data_n = img.copy()
 
         return data_n.astype('uint8')
 
-
     def comp_block_rwdt_haralick(self, data):
         """
         Extracts RDWT decompositiond and therir haralick descriptors from a gray-scale image/block.
@@ -88,35 +84,31 @@ class HaralickRDWT(Extractor):
         assert isinstance(data, np.ndarray)
 
         # 1 level SWT/ UDWT decomposition
-        
-        coeff=pywt.swt2(data, self.wavelet,1)
+
+        coeff = pywt.swt2(data, self.wavelet, 1)
         LL, (LH, HL, HH) = coeff[0]
 
-        decompositions=[LL,LH,HL,HH,data] # all four decompositions and the original data
+        decompositions = [LL, LH, HL, HH, data]  # all four decompositions and the original data
 
-        features=[]
+        features = []
 
         try:
-            mahotas=importlib.import_module('mahotas')
+            mahotas = importlib.import_module('mahotas')
         except:
-            pass # TODO: test
-
+            pass  # TODO: test
 
         for decomposition in decompositions:
 
-            ## the type should be decided; haralick needs it to be uint8
-            feat=mahotas.features.haralick(f=self.min_max_norm(decomposition,True),return_mean=True, return_mean_ptp=False,use_x_minus_y_variance=False) # this gives one mean
+            # the type should be decided; haralick needs it to be uint8
+            feat = mahotas.features.haralick(f=self.min_max_norm(decomposition, True), return_mean=True, return_mean_ptp=False, use_x_minus_y_variance=False)  # this gives one mean
 
             features.append(feat)
 
         # feature vector for the patch
-        comb_patch=np.array(features).reshape(1,-1)
+        comb_patch = np.array(features).reshape(1, -1)
 
         return comb_patch
 
-
-
-
     def __call__(self, mcdata):
         """
         Extracts RDWT+ Haralick features from multi-channel images, blockwise.
@@ -135,19 +127,19 @@ class HaralickRDWT(Extractor):
 
         assert isinstance(mcdata, np.ndarray)
 
-        if len(mcdata.shape)>2:
-            channels=mcdata.shape[0]
+        if len(mcdata.shape) > 2:
+            channels = mcdata.shape[0]
         else:
-            channels=1
-            mcdata=np.expand_dims(mcdata,0)
+            channels = 1
+            mcdata = np.expand_dims(mcdata, 0)
 
-        haralick_feat=[]
+        haralick_feat = []
 
         for channel in range(channels):
 
-            data=mcdata[channel,:]  # 2D image
+            data = mcdata[channel, :]  # 2D image
 
-            #print("data.shape",data.shape)
+            # print("data.shape",data.shape)
 
             # Make sure the data can be split into equal blocks:
             row_max = int(data.shape[0] / self.n_vert) * self.n_vert
@@ -160,8 +152,6 @@ class HaralickRDWT(Extractor):
 
             haralick_feat.append(np.array(patch_haralick_feat).flatten())
 
-        feat=np.array(haralick_feat).flatten() # flatten the features
+        feat = np.array(haralick_feat).flatten()  # flatten the features
 
         return feat
-
-       
\ No newline at end of file
diff --git a/bob/paper/mccnn/tifs2018/script/automate_v2.py b/bob/paper/mccnn/tifs2018/script/automate_v2.py
index 7866d3096af8a05b478604fec6188f1c93352eec..8456dc258e3708910e3fc3fd35edd508feb12269 100644
--- a/bob/paper/mccnn/tifs2018/script/automate_v2.py
+++ b/bob/paper/mccnn/tifs2018/script/automate_v2.py
@@ -1,7 +1,7 @@
 
 
 import os
-import numpy as np 
+import numpy as np
 
 import argparse
 
@@ -10,45 +10,42 @@ import datetime
 import subprocess
 
 
+# Modififiable parameters
 
-## Modififiable parameters
+PROTOCOL_INDEX = 0
 
-PROTOCOL_INDEX=0 
-
-SELECTED_CHANNELS = [0,1,2,3] 
+SELECTED_CHANNELS = [0, 1, 2, 3]
 
 ADAPTED_LAYERS = 'conv1-block1-group1-ffc'
 
 ADAPT_REF_CHANNEL = False
 
-################# Template paths
+# Template paths
 
-_template_trainer_config_path= '<PATH_TEMPLATE_TRAIN_CONFIG>'
-_template_trainer_script_path= '<PATH_TO_TEMPLATE_TRAINER_SCRIPT>'
-_template_pipeline_config_path= '<PATH_TO_PIPELINE>'
-_template_pipeline_script_path= '<PATH_TO_PIPELINE_SCRIPT_TEMPLATE>'
+_template_trainer_config_path = '<PATH_TEMPLATE_TRAIN_CONFIG>'
+_template_trainer_script_path = '<PATH_TO_TEMPLATE_TRAINER_SCRIPT>'
+_template_pipeline_config_path = '<PATH_TO_PIPELINE>'
+_template_pipeline_script_path = '<PATH_TO_PIPELINE_SCRIPT_TEMPLATE>'
 
 
-################################################  
+################################################
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+protocols = "grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX]  # makeup is excluded anyway here
 
 UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(len(SELECTED_CHANNELS))+"_"+protocols
 
-MODEL_FILE='<MCCNN_OUTPUT_DIR>'+'/{}/model_25_0.pth'.format(UID)
-
-_trainer_saveloc='<MCCNN_OUTPUT_DIR>'+'/{}/'.format(UID)
-_score_loc= '<MCCNN_OUTPUT_DIR>'+'{}/{}/'.format(UID,protocols)
+MODEL_FILE = '<MCCNN_OUTPUT_DIR>'+'/{}/model_25_0.pth'.format(UID)
 
-_training_config_save_loc= '<MCCNN_OUTPUT_DIR>'+'/automate/'
-_training_config_save_name=_training_config_save_loc+UID+'.py'
-
-_extractor_config_saveloc='<MCCNN_OUTPUT_DIR>'+'extractor_v2_configs/automate/'
-_extractor_config_savename=_extractor_config_saveloc+UID+'.py'
+_trainer_saveloc = '<MCCNN_OUTPUT_DIR>'+'/{}/'.format(UID)
+_score_loc = '<MCCNN_OUTPUT_DIR>'+'{}/{}/'.format(UID, protocols)
 
+_training_config_save_loc = '<MCCNN_OUTPUT_DIR>'+'/automate/'
+_training_config_save_name = _training_config_save_loc+UID+'.py'
 
+_extractor_config_saveloc = '<MCCNN_OUTPUT_DIR>'+'extractor_v2_configs/automate/'
+_extractor_config_savename = _extractor_config_saveloc+UID+'.py'
 
 
 """
@@ -57,15 +54,15 @@ Generates a job script and launches it with qsub.
 """
 
 
-source_fname= parse_arguments( cmd_params=cmd_params)
+source_fname = parse_arguments(cmd_params=cmd_params)
 
 
-save_path,name=os.path.split(source_fname)
+save_path, name = os.path.split(source_fname)
 
 
 # get the index from the filename
 
-stripped_index=int(name.split('.py')[0].split('v')[1])
+stripped_index = int(name.split('.py')[0].split('v')[1])
 
 
 # sample showing contents of base
@@ -75,25 +72,18 @@ with open(source_fname, 'r') as file:
     # read a list of lines into data
     data = file.readlines()
 
-dt_line=data[31]
-
-print("DT_LINE",dt_line)
-
-print("Protocol",data[34])
-
-
+dt_line = data[31]
 
-all_log=[]
+print("DT_LINE", dt_line)
 
+print("Protocol", data[34])
 
 
-
-for  nidx, prot in zip(range(stripped_index,stripped_index+14),protocols_base):
-    print(nidx,prot)
-
-
+all_log = []
 
 
+for nidx, prot in zip(range(stripped_index, stripped_index+14), protocols_base):
+    print(nidx, prot)
 
     with open(source_fname, 'r') as file:
         # read a list of lines into data
@@ -101,57 +91,44 @@ for  nidx, prot in zip(range(stripped_index,stripped_index+14),protocols_base):
 
     #print (data)
 
+    protocol_line = "base_protocol='Protocol_{}' # 0 for other experiments\n".format(prot)
 
-    protocol_line="base_protocol='Protocol_{}' # 0 for other experiments\n".format(prot)
-
+    dt_line = data[31].replace(str(stripped_index), str(nidx))
 
-    dt_line=data[31].replace(str(stripped_index),str(nidx))
+    # print(protocol_line)
 
-
-    #print(protocol_line)
-
-    #print(dt_line)
+    # print(dt_line)
 
     ## replace in data
 
-    data[31]=dt_line
-
-    data[34]=protocol_line
+    data[31] = dt_line
 
+    data[34] = protocol_line
 
-    save_name=save_path+'/cnn_rgb_automate_v{}.py'.format(str(nidx))
-
-    #write file out
+    save_name = save_path+'/cnn_rgb_automate_v{}.py'.format(str(nidx))
 
+    # write file out
 
     with open(save_name, 'w') as file:
-        file.writelines( data )
-
+        file.writelines(data)
 
     print("isub {}".format(save_name)+"\n")
 
-
-    command="isub {}".format(save_name) + " && "
+    command = "isub {}".format(save_name) + " && "
 
     all_log.append(command)
 
-
-
     #subprocess.call(command, shell=True)
 
 
-
-
-
-
-info="generated scripte from {} to {}".format(stripped_index,stripped_index+13)
+info = "generated scripte from {} to {}".format(stripped_index, stripped_index+13)
 
 print(info)
 
 print("".join(all_log))
 
 
-log_file='<MCCNN_OUTPUT_DIR>'+'/logs.txt'
+log_file = '<MCCNN_OUTPUT_DIR>'+'/logs.txt'
 
 with open(log_file, "a") as myfile:
     myfile.write("------------------------------\n")
@@ -161,7 +138,3 @@ with open(log_file, "a") as myfile:
     myfile.write(dt_line+"\n")
     myfile.write("Generated at:"+str(datetime.datetime.now())+"\n")
     myfile.write("------------------------------\n")
-
-
-
-
diff --git a/bob/paper/mccnn/tifs2018/script/mean_fusion.py b/bob/paper/mccnn/tifs2018/script/mean_fusion.py
index 730e672fb27f58b55aeba55ebb76bc0a9ba5e788..4de14d9b521c169fbe66204a5eb5536e0a889873 100644
--- a/bob/paper/mccnn/tifs2018/script/mean_fusion.py
+++ b/bob/paper/mccnn/tifs2018/script/mean_fusion.py
@@ -12,39 +12,38 @@ import argparse
 
 import os
 
-## Preprocessors
+# Preprocessors
 from sklearn.preprocessing import StandardScaler
 from sklearn.preprocessing import MinMaxScaler
 
-modalities=['color','thermal','infrared','depth']
-
-groups=['dev','eval']
+modalities = ['color', 'thermal', 'infrared', 'depth']
 
+groups = ['dev', 'eval']
 
 
 class Mean():
 
-    def __init__(self,weights=None):
+    def __init__(self, weights=None):
         if weights is not None:
-            self.weights=weights/sum(weights)
+            self.weights = weights/sum(weights)
         else:
-            self.weights=None
-        
-    def fit(self,X,y):
+            self.weights = None
+
+    def fit(self, X, y):
         pass
-    def predict_proba(self,X):
-        #2nd column is used
-        
+
+    def predict_proba(self, X):
+        # 2nd column is used
+
         if self.weights is not None:
-            X=X*self.weights
-        
-        prob=np.mean(X,axis=1)
-        res=np.vstack((1.0-prob,prob)).T
+            X = X*self.weights
+
+        prob = np.mean(X, axis=1)
+        res = np.vstack((1.0-prob, prob)).T
         return res
-        
 
-def parse_arguments(cmd_params=None):
 
+def parse_arguments(cmd_params=None):
     """
     Parse the command line arguments.
 
@@ -52,18 +51,17 @@ def parse_arguments(cmd_params=None):
 
     ``color_path``: py:class:`string`
         An absolute base path for extracted features from color channel
-        
+
     ``out_path``: py:class:`string`
         An absolute base path to save the concatenated features        
     """
 
     parser = argparse.ArgumentParser(description=__doc__)
-    
 
     parser.add_argument("-c", "--color-dev", type=str, help="An absolute path to the scores-dev file from color channel=.",
-                        default = "")
+                        default="")
     parser.add_argument("-s", "--save-path", type=str, help="An absolute base path to the folder to save the fused results =.",
-                        default = ".")                
+                        default=".")
 
     if cmd_params is not None:
         args = parser.parse_args(cmd_params)
@@ -76,80 +74,71 @@ def parse_arguments(cmd_params=None):
     return color_dev, save_path
 
 
-    
 def main(cmd_params=None):
-    
     """
     ./bin/python -m batl.utils.score_batl --h for help.
 
     Sample usage: ./bin/python -m -c -o 
     """
-    
-
-    color_dev, save_path = parse_arguments( cmd_params=cmd_params)
 
-    print("color_dev",color_dev)
+    color_dev, save_path = parse_arguments(cmd_params=cmd_params)
 
-    scaler=StandardScaler()
+    print("color_dev", color_dev)
 
-    train_on_dev=True
+    scaler = StandardScaler()
 
-    clf=Mean()
+    train_on_dev = True
 
+    clf = Mean()
 
-    protocol=color_dev.split('/')[-3]
+    protocol = color_dev.split('/')[-3]
 
     if save_path == ".":
 
-        save_path=protocol+'/fusion/'
-
+        save_path = protocol+'/fusion/'
 
     for group in groups:
 
-        save_name=save_path+'scores_mean_fused_'+group
+        save_name = save_path+'scores_mean_fused_'+group
 
-        eval_file_names=[]
+        eval_file_names = []
 
         for modality in modalities:
-            temp=color_dev.replace("color",modality)
-            temp=temp.replace("dev",group)
+            temp = color_dev.replace("color", modality)
+            temp = temp.replace("dev", group)
             eval_file_names.append(temp)
 
-
-        eval_df= {}
-        scores=[]
-
+        eval_df = {}
+        scores = []
 
         ## Order is important
 
         for eval_file in eval_file_names:
-            df=pd.read_csv(eval_file,sep=" ",names=['A','B','path','score'],dtype='str')
-            eval_df[eval_file]=df
+            df = pd.read_csv(eval_file, sep=" ", names=['A', 'B', 'path', 'score'], dtype='str')
+            eval_df[eval_file] = df
             scores.append(df['score'].values)
-          
-            
-        X_test=np.stack(scores).T
 
-        X_test=X_test.astype('float64')
+        X_test = np.stack(scores).T
+
+        X_test = X_test.astype('float64')
 
         # Handling NaN
-        X_test=np.nan_to_num(X_test)
+        X_test = np.nan_to_num(X_test)
 
         # Fitting and transforming scaler
 
-        #X_test=scaler.transform(X_test)
+        # X_test=scaler.transform(X_test)
 
         # Fitting classifier
-        score_predict=clf.predict_proba(X_test)[:,1]
+        score_predict = clf.predict_proba(X_test)[:, 1]
 
         #
-        df['score']=score_predict
+        df['score'] = score_predict
 
-        os.makedirs(save_path,exist_ok=True)
+        os.makedirs(save_path, exist_ok=True)
         #
         df.to_csv(save_name, sep=" ", na_rep='', float_format=None, columns=None, header=False, index=False)
 
 
 if __name__ == "__main__":
     main(cmd_params=None)
-
diff --git a/bob/paper/mccnn/tifs2018/script/scoring.py b/bob/paper/mccnn/tifs2018/script/scoring.py
index a61b03032ca4be3543f0262bb8e6ae3e9b5bfee3..80c97d65bfa032fe229115bdad86c9e4b9b9012d 100644
--- a/bob/paper/mccnn/tifs2018/script/scoring.py
+++ b/bob/paper/mccnn/tifs2018/script/scoring.py
@@ -29,101 +29,90 @@ The measure type of the development set can be changed to compute "HTER" or
 """
 
 
-import sys, os,  glob
+import sys
+import os
+import glob
 import argparse
 import numpy
 
-import bob.bio.base.score  
+import bob.bio.base.score
 import bob.measure
 
 import pandas as pd
 
 from bob.measure import (
-    far_threshold, eer_threshold, min_hter_threshold,frr_threshold)
+    far_threshold, eer_threshold, min_hter_threshold, frr_threshold)
 
 
 class custom_df():
     """
     Custom class mimicking PrettyTable type functionality with pandas Dataframe
     """
-    def __init__(self,columns):
-        self.columns=columns
-        self.frames=[]
-        
-    def add_row(self,ccolumn):
-
-        t_df=pd.DataFrame(ccolumn, self.columns).T
-        self.frames.append(t_df)    
-        
+
+    def __init__(self, columns):
+        self.columns = columns
+        self.frames = []
+
+    def add_row(self, ccolumn):
+
+        t_df = pd.DataFrame(ccolumn, self.columns).T
+        self.frames.append(t_df)
+
     def get_df(self):
         return pd.concat(self.frames)
 
 
+def get_metrics(dev_fname, eval_fname, legend):
+
+    dev_neg, dev_pos = bob.bio.base.score.split(dev_fname, ncolumns=None, sort=True)
+
+    eval_neg, eval_pos = bob.bio.base.score.split(eval_fname, ncolumns=None, sort=True)
+
+    # for BPCER20
+
+    # compute all thresholds
+
+    bpcer_1_percent_threshold = frr_threshold(dev_neg, dev_pos, 0.01, True)  # threshold for BPCER 1%
 
+    eer_thresh = eer_threshold(dev_neg, dev_pos, True)
 
+    # Old standard
 
-def get_metrics(dev_fname,eval_fname,legend):
+    dev_far, dev_frr = bob.measure.farfrr(dev_neg, dev_pos, eer_thresh)
+    dev_hter = (dev_far + dev_frr)/2.0
 
-  dev_neg, dev_pos=bob.bio.base.score.split(dev_fname, ncolumns=None, sort=True)
+    EER = float("{0:.2f}".format(dev_hter*100))
 
-  eval_neg, eval_pos=bob.bio.base.score.split(eval_fname, ncolumns=None, sort=True)
+    eval_far, eval_frr = bob.measure.farfrr(eval_neg, eval_pos, eer_thresh)
+    eval_hter = (eval_far + eval_frr)/2.0
 
+    APCER_dev, BPCER_dev = bob.measure.farfrr(dev_neg, dev_pos, bpcer_1_percent_threshold)
 
+    ACER_dev = (APCER_dev+BPCER_dev)/2.0
 
-  # for BPCER20
+    # BPCER_dev should be 1%
 
-  ## compute all thresholds
-  
-  bpcer_1_percent_threshold=frr_threshold(dev_neg, dev_pos, 0.01, True)# threshold for BPCER 1%
-  
+    APCER_eval, BPCER_eval = bob.measure.farfrr(eval_neg, eval_pos, bpcer_1_percent_threshold)
 
-  eer_thresh = eer_threshold(dev_neg,dev_pos, True)
-  
+    ACER_eval = (APCER_eval+BPCER_eval)/2.0
 
-  # Old standard
+    APCER_dev = float("{0:.2f}".format(APCER_dev*100))
 
-  dev_far, dev_frr = bob.measure.farfrr(dev_neg,dev_pos, eer_thresh)
-  dev_hter = (dev_far + dev_frr)/2.0
+    BPCER_dev = float("{0:.2f}".format(BPCER_dev*100))
 
-  EER=float("{0:.2f}".format(dev_hter*100)) 
-  
+    ACER_dev = float("{0:.2f}".format(ACER_dev*100))
 
-  eval_far, eval_frr = bob.measure.farfrr(eval_neg, eval_pos, eer_thresh)
-  eval_hter = (eval_far + eval_frr)/2.0
-  
-  
-  APCER_dev, BPCER_dev = bob.measure.farfrr(dev_neg,dev_pos, bpcer_1_percent_threshold)
-  
-  ACER_dev= (APCER_dev+BPCER_dev)/2.0
-  
-  # BPCER_dev should be 1%
-  
-  
-  APCER_eval, BPCER_eval = bob.measure.farfrr(eval_neg,eval_pos, bpcer_1_percent_threshold)
-  
-  ACER_eval= (APCER_eval+BPCER_eval)/2.0
-  
+    APCER_eval = float("{0:.2f}".format(APCER_eval*100))
 
-  APCER_dev=float("{0:.2f}".format( APCER_dev*100))
-  
-  BPCER_dev=float("{0:.2f}".format( BPCER_dev*100))
-  
-  ACER_dev=float("{0:.2f}".format( ACER_dev*100))
-  
-  APCER_eval=float("{0:.2f}".format( APCER_eval*100))
-  
-  BPCER_eval=float("{0:.2f}".format( BPCER_eval*100))
-  
-  ACER_eval=float("{0:.2f}".format( ACER_eval*100))
-  
-  print("bpcer_1_percent_threshold : ",bpcer_1_percent_threshold,"BPCER_dev : ",BPCER_dev)
-  
+    BPCER_eval = float("{0:.2f}".format(BPCER_eval*100))
 
+    ACER_eval = float("{0:.2f}".format(ACER_eval*100))
 
-  metrics=[legend,APCER_dev,ACER_dev,APCER_eval,BPCER_eval,ACER_eval]
+    print("bpcer_1_percent_threshold : ", bpcer_1_percent_threshold, "BPCER_dev : ", BPCER_dev)
 
-  return metrics
+    metrics = [legend, APCER_dev, ACER_dev, APCER_eval, BPCER_eval, ACER_eval]
 
+    return metrics
 
 
 def parse_arguments(cmd_params=None):
@@ -136,10 +125,10 @@ def parse_arguments(cmd_params=None):
         An absolute path of the score file.
     ``eval_name``: py:class:`string`
         An absolute path of the score file.
-        
+
     ``legends``: py:class:`string`
         Legends of the score files.
-        
+
     ``save_path``: py:class:`string`
         An absolute base path for saving the performance table and ROCs.
 
@@ -147,7 +136,7 @@ def parse_arguments(cmd_params=None):
     """
 
     parser = argparse.ArgumentParser(description=__doc__)
-    
+
     parser.add_argument("-df", "--dev-score-files", nargs="+", help="A list of dev score files in Bob 4-column format",
                         default=[""])
     parser.add_argument("-ef", "--eval-score-files", nargs="+", help="A list of eval score files in Bob 4-column format",
@@ -156,7 +145,7 @@ def parse_arguments(cmd_params=None):
     parser.add_argument("-l", "--legends", nargs="+", help="Legends", default=[])
 
     parser.add_argument("-s", "--save-path", type=str, help="An absolute base path for saving the performance table and ROCs, default=.",
-                        default = "dump")        
+                        default="dump")
 
     if cmd_params is not None:
         args = parser.parse_args(cmd_params)
@@ -168,54 +157,46 @@ def parse_arguments(cmd_params=None):
     legends = args.legends
     save_path = args.save_path
 
-    return dev_file_names,eval_file_names, legends, save_path
-
+    return dev_file_names, eval_file_names, legends, save_path
 
 
 def main(cmd_params=None):
-    
     """
 
     """
-    
-
-    dev_file_names,eval_file_names, legends, save_path = parse_arguments( cmd_params=cmd_params)
 
-    assert(len(dev_file_names)==len(eval_file_names))
+    dev_file_names, eval_file_names, legends, save_path = parse_arguments(cmd_params=cmd_params)
 
-    if len(legends)!=len(eval_file_names):
-      legends=[]
-      for i in range(len(eval_file_names)):
-        tname='System_'+str(i)
+    assert(len(dev_file_names) == len(eval_file_names))
 
-        legends.append(tname)
+    if len(legends) != len(eval_file_names):
+        legends = []
+        for i in range(len(eval_file_names)):
+            tname = 'System_'+str(i)
 
+            legends.append(tname)
 
+    column_names = ["Method", "APCER", "ACER", "APCER", "BPCER", "ACER"]  # BPCER to de removed later
 
-    column_names=["Method","APCER","ACER","APCER","BPCER","ACER"] # BPCER to de removed later
-
-    df=custom_df(column_names)
-    
-    #df.add_row([EER,APCER,BPCER,ACER,BPCER10,BPCER20,BPCER100])
+    df = custom_df(column_names)
 
+    # df.add_row([EER,APCER,BPCER,ACER,BPCER10,BPCER20,BPCER100])
 
     for i in range(len(dev_file_names)):
-   
-        tmetrics=get_metrics(dev_file_names[i],eval_file_names[i],legends[i])
 
-        df.add_row(tmetrics)
+        tmetrics = get_metrics(dev_file_names[i], eval_file_names[i], legends[i])
 
-    performance_table=df.get_df()
+        df.add_row(tmetrics)
 
+    performance_table = df.get_df()
 
     print(performance_table)
-    os.makedirs(save_path,exist_ok=True)
+    os.makedirs(save_path, exist_ok=True)
 
-    savename_csv=save_path+"/performance_table.csv"
+    savename_csv = save_path+"/performance_table.csv"
 
-    performance_table.to_csv(savename_csv,index=False)
+    performance_table.to_csv(savename_csv, index=False)
 
 
 if __name__ == "__main__":
     main(cmd_params=None)
-
diff --git a/bob/paper/mccnn/tifs2018/script/string_replacer.py b/bob/paper/mccnn/tifs2018/script/string_replacer.py
index 71daee037a7e6357eca9a48112fd32141e660b87..83c7ac012638f9dcf397ff90308da0d1db2d4f41 100644
--- a/bob/paper/mccnn/tifs2018/script/string_replacer.py
+++ b/bob/paper/mccnn/tifs2018/script/string_replacer.py
@@ -1,41 +1,38 @@
 import numpy as np
 
-string='python bin/scoring.py -df \
+string = 'python bin/scoring.py -df \
 <PATH_TO_RESULTS>/grandtest-color-50-LOO_fakehead/scores_mean_fused_dev -ef \
 <PATH_TO_RESULTS>/grandtest-color-50-LOO_fakehead/scores_mean_fused_eval'
-channels=['color','depth','infrared','thermal']
+channels = ['color', 'depth', 'infrared', 'thermal']
 
-unseen_protocols=['-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-commands=[]
+commands = []
 
-for idx,protocol in enumerate(unseen_protocols):
+for idx, protocol in enumerate(unseen_protocols):
 
+    nstring = string.replace('-LOO_fakehead', protocol) + " "
 
-	nstring=string.replace('-LOO_fakehead',protocol)+ " "
+    print(nstring)
 
-	print(nstring)
-
-	commands.append(nstring)
+    commands.append(nstring)
 
 print("ALL")
 
 print("&&".join(commands))
 
 
-
-commands=[]
+commands = []
 
 for channel in channels:
 
-	nstring=string.replace('color',channel)+ " "
+    nstring = string.replace('color', channel) + " "
 
-	print(nstring)
+    print(nstring)
 
-	commands.append(nstring)
+    commands.append(nstring)
 
 
 print("ALL")
 
 print("&&".join(commands))
-
diff --git a/bob/paper/mccnn/tifs2018/script/version.py b/bob/paper/mccnn/tifs2018/script/version.py
index 6d1f941d192c658823fcd9bb6f9c6c164d0795f1..e675c943d089c48d93945765c55cb7acc5b8e25d 100644
--- a/bob/paper/mccnn/tifs2018/script/version.py
+++ b/bob/paper/mccnn/tifs2018/script/version.py
@@ -5,9 +5,9 @@
 Print the text
 """
 
-def main():
-  """Print the text"""
 
-  print ("Print test text")
-  return 0
+def main():
+    """Print the text"""
 
+    print("Print test text")
+    return 0
diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py
index e16c8191c53ad62c8cead2a89cc8b0b66da61bc1..76a9662ee557a83fada7c458b4f690a9ab502ade 100644
--- a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py
+++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py
@@ -32,39 +32,39 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
 """
 
 #==============================================================================
-# Initialize the bob database instance 
+# Initialize the bob database instance
 
-data_folder_train='<FASNET_PREPROCESSED_FOLDER>'
+data_folder_train = '<FASNET_PREPROCESSED_FOLDER>'
 
-output_base_path='<FASNET_CNN_OUTPUT_PATH>' 
+output_base_path = '<FASNET_CNN_OUTPUT_PATH>'
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-PROTOCOL_INDEX=0 
+PROTOCOL_INDEX = 0
 ####################################################################
 
-frames=50
+frames = 50
 
-extension='.h5'
+extension = '.h5'
 
-train_groups=['train'] # only 'train' group is used for training the network
+train_groups = ['train']  # only 'train' group is used for training the network
 
-val_groups=['dev']
+val_groups = ['dev']
 
-do_crossvalidation=True
+do_crossvalidation = True
 ####################################################################
 
 if do_crossvalidation:
-	phases=['train','val']
+    phases = ['train', 'val']
 else:
-	phases=['train']
+    phases = ['train']
 
-groups={"train":['train'],"val":['dev']}
+groups = {"train": ['train'], "val": ['dev']}
 
 
-protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+protocols = "grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX]  # makeup is excluded anyway here
 
-exlude_attacks_list=["makeup"]
+exlude_attacks_list = ["makeup"]
 
 bob_hldi_instance = BatlPadDatabase(
     protocol=protocols,
@@ -73,36 +73,35 @@ bob_hldi_instance = BatlPadDatabase(
     landmark_detect_method="mtcnn",  # detect annotations using mtcnn
     exclude_attacks_list=exlude_attacks_list,
     exclude_pai_all_sets=True,  # exclude makeup from all the sets, which is the default behavior for grandtest protocol
-    append_color_face_roi_annot=False) 
+    append_color_face_roi_annot=False)
 
 #==============================================================================
 # Initialize the torch dataset, subselect channels from the pretrained files if needed.
 
-SELECTED_CHANNELS = [0,1,2] 
+SELECTED_CHANNELS = [0, 1, 2]
 ####################################################################
 
 
-img_transform={}
+img_transform = {}
 
-img_transform['train'] = transforms.Compose([transforms.ToPILImage(),transforms.RandomHorizontalFlip(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
-                                 std=[0.229, 0.224, 0.225])])
+img_transform['train'] = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                                                                                                                                                                              std=[0.229, 0.224, 0.225])])
 
-img_transform['val'] = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
-                                 std=[0.229, 0.224, 0.225])])
+img_transform['val'] = transforms.Compose([transforms.ToPILImage(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                                                                                                                                         std=[0.229, 0.224, 0.225])])
 
-dataset={}
+dataset = {}
 
 for phase in phases:
 
-	dataset[phase] = DataFolder(data_folder=data_folder_train,
-						 transform=img_transform[phase],
-						 extension='.hdf5',
-						 bob_hldi_instance=bob_hldi_instance,
-						 groups=groups[phase],
-						 protocol=protocols,
-						 purposes=['real', 'attack'],
-						 allow_missing_files=True)
-
+    dataset[phase] = DataFolder(data_folder=data_folder_train,
+                                transform=img_transform[phase],
+                                extension='.hdf5',
+                                bob_hldi_instance=bob_hldi_instance,
+                                groups=groups[phase],
+                                protocol=protocols,
+                                purposes=['real', 'attack'],
+                                allow_missing_files=True)
 
 
 #==============================================================================
@@ -116,18 +115,17 @@ ADAPT_REF_CHANNEL = False
 ####################################################################
 
 
-
 batch_size = 32
 num_workers = 0
-epochs=25
-learning_rate=0.0001
+epochs = 25
+learning_rate = 0.0001
 seed = 3
 use_gpu = False
 adapted_layers = ADAPTED_LAYERS
 adapt_reference_channel = ADAPT_REF_CHANNEL
 verbose = 2
 UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
-training_logs= output_base_path+UID+'/train_log_dir/'
+training_logs = output_base_path+UID+'/train_log_dir/'
 output_dir = output_base_path+UID
 
 
@@ -135,6 +133,6 @@ output_dir = output_base_path+UID
 # Load the architecture
 
 
-assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
+assert(len(SELECTED_CHANNELS) == NUM_CHANNELS)
 
-network=FASNet(pretrained=True)
+network = FASNet(pretrained=True)
diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py
index 62c473b6b226e8ce556f8963b1407cebf4a0d04e..1ac899f3b06c2639066bf151d6a1ed9e606bf597 100644
--- a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py
+++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py
@@ -32,26 +32,26 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
 """
 
 #==============================================================================
-# Initialize the bob database instance 
+# Initialize the bob database instance
 
-data_folder_train='<MCCNN_PREPROCESSED_PATH>'
+data_folder_train = '<MCCNN_PREPROCESSED_PATH>'
 
-output_base_path='<MCCNN_CNN_OUTPUT_PATH>' 
+output_base_path = '<MCCNN_CNN_OUTPUT_PATH>'
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-PROTOCOL_INDEX=0 
+PROTOCOL_INDEX = 0
 ####################################################################
 
-frames=50
+frames = 50
 
-extension='.h5'
+extension = '.h5'
 
-train_groups=['train'] # only 'train' group is used for training the network
+train_groups = ['train']  # only 'train' group is used for training the network
 
-protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+protocols = "grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX]  # makeup is excluded anyway here
 
-exlude_attacks_list=["makeup"]
+exlude_attacks_list = ["makeup"]
 
 bob_hldi_instance_train = BatlPadDatabase(
     protocol=protocols,
@@ -60,25 +60,24 @@ bob_hldi_instance_train = BatlPadDatabase(
     landmark_detect_method="mtcnn",  # detect annotations using mtcnn
     exclude_attacks_list=exlude_attacks_list,
     exclude_pai_all_sets=True,  # exclude makeup from all the sets, which is the default behavior for grandtest protocol
-    append_color_face_roi_annot=False) 
+    append_color_face_roi_annot=False)
 
 #==============================================================================
 # Initialize the torch dataset, subselect channels from the pretrained files if needed.
 
-SELECTED_CHANNELS = [0,1,2,3] 
+SELECTED_CHANNELS = [0, 1, 2, 3]
 ####################################################################
 
-img_transform_train = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),RandomHorizontalFlipImage(p=0.5),transforms.ToTensor()])
+img_transform_train = transforms.Compose([ChannelSelect(selected_channels=SELECTED_CHANNELS), RandomHorizontalFlipImage(p=0.5), transforms.ToTensor()])
 
 dataset = DataFolder(data_folder=data_folder_train,
-					 transform=img_transform_train,
-					 extension='.hdf5',
-					 bob_hldi_instance=bob_hldi_instance_train,
-					 groups=train_groups,
-					 protocol=protocols,
-					 purposes=['real', 'attack'],
-					 allow_missing_files=True)
-
+                     transform=img_transform_train,
+                     extension='.hdf5',
+                     bob_hldi_instance=bob_hldi_instance_train,
+                     groups=train_groups,
+                     protocol=protocols,
+                     purposes=['real', 'attack'],
+                     allow_missing_files=True)
 
 
 #==============================================================================
@@ -92,18 +91,17 @@ ADAPT_REF_CHANNEL = False
 ####################################################################
 
 
-
 batch_size = 32
 num_workers = 0
-epochs=25
-learning_rate=0.0001
+epochs = 25
+learning_rate = 0.0001
 seed = 3
 use_gpu = False
 adapted_layers = ADAPTED_LAYERS
 adapt_reference_channel = ADAPT_REF_CHANNEL
 verbose = 2
 UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
-training_logs= output_base_path+UID+'/train_log_dir/'
+training_logs = output_base_path+UID+'/train_log_dir/'
 output_dir = output_base_path+UID
 
 
@@ -111,6 +109,6 @@ output_dir = output_base_path+UID
 # Load the architecture
 
 
-assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
+assert(len(SELECTED_CHANNELS) == NUM_CHANNELS)
 
-network=MCCNN(num_channels = NUM_CHANNELS)
+network = MCCNN(num_channels=NUM_CHANNELS)
diff --git a/doc/index.rst b/doc/index.rst
index d2752acd1789535ea78a383d4bca7cdd354c6b57..6569c150b560ce84b6c10cd9b4a04ef5cb3441ad 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -25,9 +25,9 @@ User guide
 .. toctree::
    :maxdepth: 2
 
-   running_baslines
+   running_baselines
    running_fasnet
-   running_mccn
+   running_mccnn
    references
 
 
diff --git a/doc/running_baslines.md b/doc/running_baselines.rst
similarity index 95%
rename from doc/running_baslines.md
rename to doc/running_baselines.rst
index a1070229e00e70396a4fee858911bf5ce1e0115d..fea3a3ab97a911ebe793be4d82773b2a83f612b9 100644
--- a/doc/running_baslines.md
+++ b/doc/running_baselines.rst
@@ -14,8 +14,8 @@ can be installed as
 
 The steps to reproduce the results for grandtest protocol are listed below. 
 
-Color channel
--------------
+A. Color channel
+----------------
 
 1.A.1. IQM - LR  
 
@@ -140,7 +140,7 @@ D. Thermal channel
      <PATH_TO_BASELINE_RESULTS>/thermal/haralicksvm/grandtest-thermal-50/scores/scores-eval 
 
 
-E. Score fusion (haralick-svm)
+E. Score fusion (Haralick-SVM)
 ------------------------------
 
 .. code-block:: sh
@@ -169,12 +169,12 @@ F. Score fusion (IQM-LBP-LR)
      <PATH_TO_BASELINE_RESULTS>/mean_fusion/grandtest/scores_mean_fused_eval 
 
 
-BASELINES in LOO protocols (for scorefusion)
-============================================
+BASELINES in LOO protocols (for score fusion)
+=============================================
 
 For the experiments first do individual experiments for all channels, after that perform fusion. This needs to be repeated for all the protocols
 
-A. color
+A. Color
 --------
 
 unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
@@ -184,8 +184,6 @@ The protocols can be easily indexed as
 PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
 
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -199,11 +197,9 @@ PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
 
 
 
-B. depth
+B. Depth
 --------
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -220,8 +216,6 @@ B. depth
 C. Infrared
 -----------
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -237,8 +231,6 @@ C. Infrared
 D. Thermal
 ----------
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -255,10 +247,8 @@ D. Thermal
 E. Score fusion
 ---------------
 
-The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'
-
+The protocols are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'
 
-1.
 .. code-block:: sh
 
      ./bin/python bin/mean_fusion.py -c \
@@ -274,11 +264,11 @@ The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_pape
 
 Similarly, repeat the experiment for all protocols.
 
-BASELINES in LOO (haralick svm)
-===============================
+BASELINES in LOO protocols (Haralick-SVM)
+=========================================
 
 
-color
+Color
 -----
 .. code-block:: sh
 
@@ -286,8 +276,8 @@ color
 
 Repeat the same procedure for all four channels. 
 
-Scorefusion Haralick-SVM
-------------------------
+Score fusion Haralick-SVM
+-------------------------
 
 Once scores from all channels are available. Run the following command. 
 
@@ -295,7 +285,7 @@ Once scores from all channels are available. Run the following command.
 
      ./bin/python bin/mean_fusion.py -c <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev -s <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/ 
 
-scoring for Haralick Mean fusion
+Scoring for Haralick Mean fusion
 --------------------------------
 .. code-block:: sh
 
diff --git a/doc/running_fasnet.md b/doc/running_fasnet.rst
similarity index 98%
rename from doc/running_fasnet.md
rename to doc/running_fasnet.rst
index 5e139b6b4c233eb529cf38f08497904cc410c962..8a57b02edb7683cf128dfb8cee98a392e98c5894 100644
--- a/doc/running_fasnet.md
+++ b/doc/running_fasnet.rst
@@ -1,6 +1,6 @@
 
 Training FASNet for face PAD
-===========================
+============================
 
 This section describes running our implementation of FASNet on WMCA dataset. It is **strongly recommended** to check the publication for better understanding
 of the described work-flow.
@@ -11,6 +11,7 @@ FASNet accepts RGB images only, hence the preprocesing is done first. This can b
 
 
 .. code-block:: sh
+
 	./bin/spoof.py \
 	wmca-color \
 	fasnet \
@@ -23,8 +24,8 @@ which is notated from here onwards as  ``<FASNET_PREPROCESSED_FOLDER>``.
 
 
 
-Training FASTNET
---------------
+Training FASNET
+---------------
 Once the preprocessing is done, the next step is to train the FASNET architecture. A config file is defined which 
 can define the transforms, image resolution, training parameters such as number of epochs, learning rate and so on.  
 
diff --git a/doc/running_mccn.md b/doc/running_mccnn.rst
similarity index 100%
rename from doc/running_mccn.md
rename to doc/running_mccnn.rst
diff --git a/dump/performance_table.csv b/dump/performance_table.csv
new file mode 100644
index 0000000000000000000000000000000000000000..82424a03b0d86b3549faa3ab8bc8ad231afcb391
--- /dev/null
+++ b/dump/performance_table.csv
@@ -0,0 +1,2 @@
+Method,APCER,ACER,APCER,BPCER,ACER
+System_0,0.68,0.84,0.6,0.0,0.3
diff --git a/submitted.sql3 b/submitted.sql3
new file mode 100644
index 0000000000000000000000000000000000000000..46418b0e76730cd3f0a48125ab2b30f96b46f7ad
Binary files /dev/null and b/submitted.sql3 differ