From 22509af93b872ad2172a65f1127abdf23447d1f3 Mon Sep 17 00:00:00 2001
From: ageorge <anjith.george@idiap.ch>
Date: Tue, 7 May 2019 14:46:58 +0200
Subject: [PATCH] Clean up and doc fixes

---
 .../mccnn/tifs2018/config/FASNet_config.py    |  22 +--
 .../mccnn/tifs2018/database/batl_db_color.py  |   8 +-
 .../mccnn/tifs2018/database/batl_db_depth.py  |   8 +-
 .../tifs2018/database/batl_db_infrared.py     |   8 +-
 .../database/batl_db_rgb_ir_d_t_grandtest.py  |   8 +-
 .../tifs2018/database/batl_db_thermal.py      |   8 +-
 .../mccnn/tifs2018/extractor/HaralickRDWT.py  |  52 +++---
 .../mccnn/tifs2018/script/automate_v2.py      | 107 +++++-------
 .../mccnn/tifs2018/script/mean_fusion.py      |  97 +++++------
 bob/paper/mccnn/tifs2018/script/scoring.py    | 155 ++++++++----------
 .../mccnn/tifs2018/script/string_replacer.py  |  27 ++-
 bob/paper/mccnn/tifs2018/script/version.py    |   8 +-
 .../tifs2018/trainer_configs/wmca_fasnet.py   |  74 ++++-----
 .../tifs2018/trainer_configs/wmca_mccnn.py    |  52 +++---
 doc/index.rst                                 |   4 +-
 ...ning_baslines.md => running_baselines.rst} |  38 ++---
 doc/{running_fasnet.md => running_fasnet.rst} |   7 +-
 doc/{running_mccn.md => running_mccnn.rst}    |   0
 dump/performance_table.csv                    |   2 +
 submitted.sql3                                | Bin 0 -> 90112 bytes
 20 files changed, 302 insertions(+), 383 deletions(-)
 rename doc/{running_baslines.md => running_baselines.rst} (95%)
 rename doc/{running_fasnet.md => running_fasnet.rst} (98%)
 rename doc/{running_mccn.md => running_mccnn.rst} (100%)
 create mode 100644 dump/performance_table.csv
 create mode 100644 submitted.sql3

diff --git a/bob/paper/mccnn/tifs2018/config/FASNet_config.py b/bob/paper/mccnn/tifs2018/config/FASNet_config.py
index 86978a9..fe0fa62 100644
--- a/bob/paper/mccnn/tifs2018/config/FASNet_config.py
+++ b/bob/paper/mccnn/tifs2018/config/FASNet_config.py
@@ -17,6 +17,8 @@ from torchvision import transforms
 
 from bob.learn.pytorch.datasets import ChannelSelect
 
+import os
+
 # names of the channels to process:
 _channel_names = ['color', 'depth', 'infrared', 'thermal']
 
@@ -98,29 +100,27 @@ from bob.bio.video.extractor import Wrapper
 
 # If you want to use the pretrained model
 
-USE_PRETRAINED_MODEL=True
+USE_PRETRAINED_MODEL = True
 
 if USE_PRETRAINED_MODEL:
 
-  import pkg_resources
+    import pkg_resources
 
-  MODEL_FILE = pkg_resources.resource_filename('bob.paper.mccnn.tifs2018', 'models/fasnet.pth')
+    MODEL_FILE = pkg_resources.resource_filename('bob.paper.mccnn.tifs2018', 'models/fasnet.pth')
 
-  URL = 'https://www.idiap.ch/software/bob/data/bob/bob.paper.mccnn.tifs2018/master/fasnet.pth'
+    URL = 'https://www.idiap.ch/software/bob/data/bob/bob.paper.mccnn.tifs2018/master/fasnet.pth'
 
-  if not os.path.exists(MODEL_FILE):
+    if not os.path.exists(MODEL_FILE):
 
-      logger.info('Downloading the FASNet model')
+        logger.info('Downloading the FASNet model')
 
-      bob.extension.download.download_file(URL, MODEL_FILE)
+        bob.extension.download.download_file(URL, MODEL_FILE)
 
-      logger.info('Downloaded FASNet model to location: {}'.format(MODEL_FILE))
+        logger.info('Downloaded FASNet model to location: {}'.format(MODEL_FILE))
 
 else:
 
-  MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'
-
-
+    MODEL_FILE = None  # Replace with '<PATH_TO_MODEL>'
 
 
 _img_transform = transforms.Compose([transforms.ToPILImage(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_color.py b/bob/paper/mccnn/tifs2018/database/batl_db_color.py
index 09060cf..fc83b38 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_color.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_color.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_depth.py b/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
index 5e982a6..0803ad3 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_depth.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-depth-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py b/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
index e69e8c9..5e14b96 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_infrared.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-infrared-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py b/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
index aa05a0e..6ed39f9 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_rgb_ir_d_t_grandtest.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-color*infrared*depth*thermal-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py b/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
index c04b715..e1518f4 100644
--- a/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
+++ b/bob/paper/mccnn/tifs2018/database/batl_db_thermal.py
@@ -14,7 +14,7 @@ ORIGINAL_EXTENSION = ".h5"  # extension of the data files
 
 ANNOTATIONS_TEMP_DIR = "[YOUR_WMCA_ANNOTATIONS_DIRECTORY]"
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
 PROTOCOL = 'grandtest-thermal-50'+unseen_protocols[0]
 
@@ -22,10 +22,10 @@ database = BatlPadDatabase(
     protocol=PROTOCOL,
     original_directory=ORIGINAL_DIRECTORY,
     original_extension=ORIGINAL_EXTENSION,
-    landmark_detect_method="mtcnn",  
+    landmark_detect_method="mtcnn",
     exclude_attacks_list=['makeup'],
-    exclude_pai_all_sets=True, 
-    append_color_face_roi_annot=False) 
+    exclude_pai_all_sets=True,
+    append_color_face_roi_annot=False)
 
 """The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
 database settings.
diff --git a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
index 13bea64..aede4aa 100644
--- a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
+++ b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py
@@ -4,7 +4,7 @@ import bob.bio.video
 import bob.ip.base
 import numpy as np
 
-#Extra packages
+# Extra packages
 import pywt
 import importlib
 
@@ -42,14 +42,12 @@ class HaralickRDWT(Extractor):
             n_hor=n_hor,
             n_vert=n_vert)
 
-    
         self.dtype = dtype
-        self.wavelet=wavelet
+        self.wavelet = wavelet
         self.n_hor = n_hor
         self.n_vert = n_vert
 
-
-    def min_max_norm(self,img,do_norm):
+    def min_max_norm(self, img, do_norm):
         """
         Normalizes the image to 0-255 range based on min max range, and cast it to 'int8'
 
@@ -62,12 +60,10 @@ class HaralickRDWT(Extractor):
 
             data_n = ((img-t_min)/(t_max-t_min))*255.0
         else:
-            data_n=img.copy()
-
+            data_n = img.copy()
 
         return data_n.astype('uint8')
 
-
     def comp_block_rwdt_haralick(self, data):
         """
         Extracts RDWT decompositiond and therir haralick descriptors from a gray-scale image/block.
@@ -88,35 +84,31 @@ class HaralickRDWT(Extractor):
         assert isinstance(data, np.ndarray)
 
         # 1 level SWT/ UDWT decomposition
-        
-        coeff=pywt.swt2(data, self.wavelet,1)
+
+        coeff = pywt.swt2(data, self.wavelet, 1)
         LL, (LH, HL, HH) = coeff[0]
 
-        decompositions=[LL,LH,HL,HH,data] # all four decompositions and the original data
+        decompositions = [LL, LH, HL, HH, data]  # all four decompositions and the original data
 
-        features=[]
+        features = []
 
         try:
-            mahotas=importlib.import_module('mahotas')
+            mahotas = importlib.import_module('mahotas')
         except:
-            pass # TODO: test
-
+            pass  # TODO: test
 
         for decomposition in decompositions:
 
-            ## the type should be decided; haralick needs it to be uint8
-            feat=mahotas.features.haralick(f=self.min_max_norm(decomposition,True),return_mean=True, return_mean_ptp=False,use_x_minus_y_variance=False) # this gives one mean
+            # the type should be decided; haralick needs it to be uint8
+            feat = mahotas.features.haralick(f=self.min_max_norm(decomposition, True), return_mean=True, return_mean_ptp=False, use_x_minus_y_variance=False)  # this gives one mean
 
             features.append(feat)
 
         # feature vector for the patch
-        comb_patch=np.array(features).reshape(1,-1)
+        comb_patch = np.array(features).reshape(1, -1)
 
         return comb_patch
 
-
-
-
     def __call__(self, mcdata):
         """
         Extracts RDWT+ Haralick features from multi-channel images, blockwise.
@@ -135,19 +127,19 @@ class HaralickRDWT(Extractor):
 
         assert isinstance(mcdata, np.ndarray)
 
-        if len(mcdata.shape)>2:
-            channels=mcdata.shape[0]
+        if len(mcdata.shape) > 2:
+            channels = mcdata.shape[0]
         else:
-            channels=1
-            mcdata=np.expand_dims(mcdata,0)
+            channels = 1
+            mcdata = np.expand_dims(mcdata, 0)
 
-        haralick_feat=[]
+        haralick_feat = []
 
         for channel in range(channels):
 
-            data=mcdata[channel,:]  # 2D image
+            data = mcdata[channel, :]  # 2D image
 
-            #print("data.shape",data.shape)
+            # print("data.shape",data.shape)
 
             # Make sure the data can be split into equal blocks:
             row_max = int(data.shape[0] / self.n_vert) * self.n_vert
@@ -160,8 +152,6 @@ class HaralickRDWT(Extractor):
 
             haralick_feat.append(np.array(patch_haralick_feat).flatten())
 
-        feat=np.array(haralick_feat).flatten() # flatten the features
+        feat = np.array(haralick_feat).flatten()  # flatten the features
 
         return feat
-
-       
\ No newline at end of file
diff --git a/bob/paper/mccnn/tifs2018/script/automate_v2.py b/bob/paper/mccnn/tifs2018/script/automate_v2.py
index 7866d30..8456dc2 100644
--- a/bob/paper/mccnn/tifs2018/script/automate_v2.py
+++ b/bob/paper/mccnn/tifs2018/script/automate_v2.py
@@ -1,7 +1,7 @@
 
 
 import os
-import numpy as np 
+import numpy as np
 
 import argparse
 
@@ -10,45 +10,42 @@ import datetime
 import subprocess
 
 
+# Modififiable parameters
 
-## Modififiable parameters
+PROTOCOL_INDEX = 0
 
-PROTOCOL_INDEX=0 
-
-SELECTED_CHANNELS = [0,1,2,3] 
+SELECTED_CHANNELS = [0, 1, 2, 3]
 
 ADAPTED_LAYERS = 'conv1-block1-group1-ffc'
 
 ADAPT_REF_CHANNEL = False
 
-################# Template paths
+# Template paths
 
-_template_trainer_config_path= '<PATH_TEMPLATE_TRAIN_CONFIG>'
-_template_trainer_script_path= '<PATH_TO_TEMPLATE_TRAINER_SCRIPT>'
-_template_pipeline_config_path= '<PATH_TO_PIPELINE>'
-_template_pipeline_script_path= '<PATH_TO_PIPELINE_SCRIPT_TEMPLATE>'
+_template_trainer_config_path = '<PATH_TEMPLATE_TRAIN_CONFIG>'
+_template_trainer_script_path = '<PATH_TO_TEMPLATE_TRAINER_SCRIPT>'
+_template_pipeline_config_path = '<PATH_TO_PIPELINE>'
+_template_pipeline_script_path = '<PATH_TO_PIPELINE_SCRIPT_TEMPLATE>'
 
 
-################################################  
+################################################
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+protocols = "grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX]  # makeup is excluded anyway here
 
 UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(len(SELECTED_CHANNELS))+"_"+protocols
 
-MODEL_FILE='<MCCNN_OUTPUT_DIR>'+'/{}/model_25_0.pth'.format(UID)
-
-_trainer_saveloc='<MCCNN_OUTPUT_DIR>'+'/{}/'.format(UID)
-_score_loc= '<MCCNN_OUTPUT_DIR>'+'{}/{}/'.format(UID,protocols)
+MODEL_FILE = '<MCCNN_OUTPUT_DIR>'+'/{}/model_25_0.pth'.format(UID)
 
-_training_config_save_loc= '<MCCNN_OUTPUT_DIR>'+'/automate/'
-_training_config_save_name=_training_config_save_loc+UID+'.py'
-
-_extractor_config_saveloc='<MCCNN_OUTPUT_DIR>'+'extractor_v2_configs/automate/'
-_extractor_config_savename=_extractor_config_saveloc+UID+'.py'
+_trainer_saveloc = '<MCCNN_OUTPUT_DIR>'+'/{}/'.format(UID)
+_score_loc = '<MCCNN_OUTPUT_DIR>'+'{}/{}/'.format(UID, protocols)
 
+_training_config_save_loc = '<MCCNN_OUTPUT_DIR>'+'/automate/'
+_training_config_save_name = _training_config_save_loc+UID+'.py'
 
+_extractor_config_saveloc = '<MCCNN_OUTPUT_DIR>'+'extractor_v2_configs/automate/'
+_extractor_config_savename = _extractor_config_saveloc+UID+'.py'
 
 
 """
@@ -57,15 +54,15 @@ Generates a job script and launches it with qsub.
 """
 
 
-source_fname= parse_arguments( cmd_params=cmd_params)
+source_fname = parse_arguments(cmd_params=cmd_params)
 
 
-save_path,name=os.path.split(source_fname)
+save_path, name = os.path.split(source_fname)
 
 
 # get the index from the filename
 
-stripped_index=int(name.split('.py')[0].split('v')[1])
+stripped_index = int(name.split('.py')[0].split('v')[1])
 
 
 # sample showing contents of base
@@ -75,25 +72,18 @@ with open(source_fname, 'r') as file:
     # read a list of lines into data
     data = file.readlines()
 
-dt_line=data[31]
-
-print("DT_LINE",dt_line)
-
-print("Protocol",data[34])
-
-
+dt_line = data[31]
 
-all_log=[]
+print("DT_LINE", dt_line)
 
+print("Protocol", data[34])
 
 
-
-for  nidx, prot in zip(range(stripped_index,stripped_index+14),protocols_base):
-    print(nidx,prot)
-
-
+all_log = []
 
 
+for nidx, prot in zip(range(stripped_index, stripped_index+14), protocols_base):
+    print(nidx, prot)
 
     with open(source_fname, 'r') as file:
         # read a list of lines into data
@@ -101,57 +91,44 @@ for  nidx, prot in zip(range(stripped_index,stripped_index+14),protocols_base):
 
     #print (data)
 
+    protocol_line = "base_protocol='Protocol_{}' # 0 for other experiments\n".format(prot)
 
-    protocol_line="base_protocol='Protocol_{}' # 0 for other experiments\n".format(prot)
-
+    dt_line = data[31].replace(str(stripped_index), str(nidx))
 
-    dt_line=data[31].replace(str(stripped_index),str(nidx))
+    # print(protocol_line)
 
-
-    #print(protocol_line)
-
-    #print(dt_line)
+    # print(dt_line)
 
     ## replace in data
 
-    data[31]=dt_line
-
-    data[34]=protocol_line
+    data[31] = dt_line
 
+    data[34] = protocol_line
 
-    save_name=save_path+'/cnn_rgb_automate_v{}.py'.format(str(nidx))
-
-    #write file out
+    save_name = save_path+'/cnn_rgb_automate_v{}.py'.format(str(nidx))
 
+    # write file out
 
     with open(save_name, 'w') as file:
-        file.writelines( data )
-
+        file.writelines(data)
 
     print("isub {}".format(save_name)+"\n")
 
-
-    command="isub {}".format(save_name) + " && "
+    command = "isub {}".format(save_name) + " && "
 
     all_log.append(command)
 
-
-
     #subprocess.call(command, shell=True)
 
 
-
-
-
-
-info="generated scripte from {} to {}".format(stripped_index,stripped_index+13)
+info = "generated scripte from {} to {}".format(stripped_index, stripped_index+13)
 
 print(info)
 
 print("".join(all_log))
 
 
-log_file='<MCCNN_OUTPUT_DIR>'+'/logs.txt'
+log_file = '<MCCNN_OUTPUT_DIR>'+'/logs.txt'
 
 with open(log_file, "a") as myfile:
     myfile.write("------------------------------\n")
@@ -161,7 +138,3 @@ with open(log_file, "a") as myfile:
     myfile.write(dt_line+"\n")
     myfile.write("Generated at:"+str(datetime.datetime.now())+"\n")
     myfile.write("------------------------------\n")
-
-
-
-
diff --git a/bob/paper/mccnn/tifs2018/script/mean_fusion.py b/bob/paper/mccnn/tifs2018/script/mean_fusion.py
index 730e672..4de14d9 100644
--- a/bob/paper/mccnn/tifs2018/script/mean_fusion.py
+++ b/bob/paper/mccnn/tifs2018/script/mean_fusion.py
@@ -12,39 +12,38 @@ import argparse
 
 import os
 
-## Preprocessors
+# Preprocessors
 from sklearn.preprocessing import StandardScaler
 from sklearn.preprocessing import MinMaxScaler
 
-modalities=['color','thermal','infrared','depth']
-
-groups=['dev','eval']
+modalities = ['color', 'thermal', 'infrared', 'depth']
 
+groups = ['dev', 'eval']
 
 
 class Mean():
 
-    def __init__(self,weights=None):
+    def __init__(self, weights=None):
         if weights is not None:
-            self.weights=weights/sum(weights)
+            self.weights = weights/sum(weights)
         else:
-            self.weights=None
-        
-    def fit(self,X,y):
+            self.weights = None
+
+    def fit(self, X, y):
         pass
-    def predict_proba(self,X):
-        #2nd column is used
-        
+
+    def predict_proba(self, X):
+        # 2nd column is used
+
         if self.weights is not None:
-            X=X*self.weights
-        
-        prob=np.mean(X,axis=1)
-        res=np.vstack((1.0-prob,prob)).T
+            X = X*self.weights
+
+        prob = np.mean(X, axis=1)
+        res = np.vstack((1.0-prob, prob)).T
         return res
-        
 
-def parse_arguments(cmd_params=None):
 
+def parse_arguments(cmd_params=None):
     """
     Parse the command line arguments.
 
@@ -52,18 +51,17 @@ def parse_arguments(cmd_params=None):
 
     ``color_path``: py:class:`string`
         An absolute base path for extracted features from color channel
-        
+
     ``out_path``: py:class:`string`
         An absolute base path to save the concatenated features        
     """
 
     parser = argparse.ArgumentParser(description=__doc__)
-    
 
     parser.add_argument("-c", "--color-dev", type=str, help="An absolute path to the scores-dev file from color channel=.",
-                        default = "")
+                        default="")
     parser.add_argument("-s", "--save-path", type=str, help="An absolute base path to the folder to save the fused results =.",
-                        default = ".")                
+                        default=".")
 
     if cmd_params is not None:
         args = parser.parse_args(cmd_params)
@@ -76,80 +74,71 @@ def parse_arguments(cmd_params=None):
     return color_dev, save_path
 
 
-    
 def main(cmd_params=None):
-    
     """
     ./bin/python -m batl.utils.score_batl --h for help.
 
     Sample usage: ./bin/python -m -c -o 
     """
-    
-
-    color_dev, save_path = parse_arguments( cmd_params=cmd_params)
 
-    print("color_dev",color_dev)
+    color_dev, save_path = parse_arguments(cmd_params=cmd_params)
 
-    scaler=StandardScaler()
+    print("color_dev", color_dev)
 
-    train_on_dev=True
+    scaler = StandardScaler()
 
-    clf=Mean()
+    train_on_dev = True
 
+    clf = Mean()
 
-    protocol=color_dev.split('/')[-3]
+    protocol = color_dev.split('/')[-3]
 
     if save_path == ".":
 
-        save_path=protocol+'/fusion/'
-
+        save_path = protocol+'/fusion/'
 
     for group in groups:
 
-        save_name=save_path+'scores_mean_fused_'+group
+        save_name = save_path+'scores_mean_fused_'+group
 
-        eval_file_names=[]
+        eval_file_names = []
 
         for modality in modalities:
-            temp=color_dev.replace("color",modality)
-            temp=temp.replace("dev",group)
+            temp = color_dev.replace("color", modality)
+            temp = temp.replace("dev", group)
             eval_file_names.append(temp)
 
-
-        eval_df= {}
-        scores=[]
-
+        eval_df = {}
+        scores = []
 
         ## Order is important
 
         for eval_file in eval_file_names:
-            df=pd.read_csv(eval_file,sep=" ",names=['A','B','path','score'],dtype='str')
-            eval_df[eval_file]=df
+            df = pd.read_csv(eval_file, sep=" ", names=['A', 'B', 'path', 'score'], dtype='str')
+            eval_df[eval_file] = df
             scores.append(df['score'].values)
-          
-            
-        X_test=np.stack(scores).T
 
-        X_test=X_test.astype('float64')
+        X_test = np.stack(scores).T
+
+        X_test = X_test.astype('float64')
 
         # Handling NaN
-        X_test=np.nan_to_num(X_test)
+        X_test = np.nan_to_num(X_test)
 
         # Fitting and transforming scaler
 
-        #X_test=scaler.transform(X_test)
+        # X_test=scaler.transform(X_test)
 
         # Fitting classifier
-        score_predict=clf.predict_proba(X_test)[:,1]
+        score_predict = clf.predict_proba(X_test)[:, 1]
 
         #
-        df['score']=score_predict
+        df['score'] = score_predict
 
-        os.makedirs(save_path,exist_ok=True)
+        os.makedirs(save_path, exist_ok=True)
         #
         df.to_csv(save_name, sep=" ", na_rep='', float_format=None, columns=None, header=False, index=False)
 
 
 if __name__ == "__main__":
     main(cmd_params=None)
-
diff --git a/bob/paper/mccnn/tifs2018/script/scoring.py b/bob/paper/mccnn/tifs2018/script/scoring.py
index a61b030..80c97d6 100644
--- a/bob/paper/mccnn/tifs2018/script/scoring.py
+++ b/bob/paper/mccnn/tifs2018/script/scoring.py
@@ -29,101 +29,90 @@ The measure type of the development set can be changed to compute "HTER" or
 """
 
 
-import sys, os,  glob
+import sys
+import os
+import glob
 import argparse
 import numpy
 
-import bob.bio.base.score  
+import bob.bio.base.score
 import bob.measure
 
 import pandas as pd
 
 from bob.measure import (
-    far_threshold, eer_threshold, min_hter_threshold,frr_threshold)
+    far_threshold, eer_threshold, min_hter_threshold, frr_threshold)
 
 
 class custom_df():
     """
     Custom class mimicking PrettyTable type functionality with pandas Dataframe
     """
-    def __init__(self,columns):
-        self.columns=columns
-        self.frames=[]
-        
-    def add_row(self,ccolumn):
-
-        t_df=pd.DataFrame(ccolumn, self.columns).T
-        self.frames.append(t_df)    
-        
+
+    def __init__(self, columns):
+        self.columns = columns
+        self.frames = []
+
+    def add_row(self, ccolumn):
+
+        t_df = pd.DataFrame(ccolumn, self.columns).T
+        self.frames.append(t_df)
+
     def get_df(self):
         return pd.concat(self.frames)
 
 
+def get_metrics(dev_fname, eval_fname, legend):
+
+    dev_neg, dev_pos = bob.bio.base.score.split(dev_fname, ncolumns=None, sort=True)
+
+    eval_neg, eval_pos = bob.bio.base.score.split(eval_fname, ncolumns=None, sort=True)
+
+    # for BPCER20
+
+    # compute all thresholds
+
+    bpcer_1_percent_threshold = frr_threshold(dev_neg, dev_pos, 0.01, True)  # threshold for BPCER 1%
 
+    eer_thresh = eer_threshold(dev_neg, dev_pos, True)
 
+    # Old standard
 
-def get_metrics(dev_fname,eval_fname,legend):
+    dev_far, dev_frr = bob.measure.farfrr(dev_neg, dev_pos, eer_thresh)
+    dev_hter = (dev_far + dev_frr)/2.0
 
-  dev_neg, dev_pos=bob.bio.base.score.split(dev_fname, ncolumns=None, sort=True)
+    EER = float("{0:.2f}".format(dev_hter*100))
 
-  eval_neg, eval_pos=bob.bio.base.score.split(eval_fname, ncolumns=None, sort=True)
+    eval_far, eval_frr = bob.measure.farfrr(eval_neg, eval_pos, eer_thresh)
+    eval_hter = (eval_far + eval_frr)/2.0
 
+    APCER_dev, BPCER_dev = bob.measure.farfrr(dev_neg, dev_pos, bpcer_1_percent_threshold)
 
+    ACER_dev = (APCER_dev+BPCER_dev)/2.0
 
-  # for BPCER20
+    # BPCER_dev should be 1%
 
-  ## compute all thresholds
-  
-  bpcer_1_percent_threshold=frr_threshold(dev_neg, dev_pos, 0.01, True)# threshold for BPCER 1%
-  
+    APCER_eval, BPCER_eval = bob.measure.farfrr(eval_neg, eval_pos, bpcer_1_percent_threshold)
 
-  eer_thresh = eer_threshold(dev_neg,dev_pos, True)
-  
+    ACER_eval = (APCER_eval+BPCER_eval)/2.0
 
-  # Old standard
+    APCER_dev = float("{0:.2f}".format(APCER_dev*100))
 
-  dev_far, dev_frr = bob.measure.farfrr(dev_neg,dev_pos, eer_thresh)
-  dev_hter = (dev_far + dev_frr)/2.0
+    BPCER_dev = float("{0:.2f}".format(BPCER_dev*100))
 
-  EER=float("{0:.2f}".format(dev_hter*100)) 
-  
+    ACER_dev = float("{0:.2f}".format(ACER_dev*100))
 
-  eval_far, eval_frr = bob.measure.farfrr(eval_neg, eval_pos, eer_thresh)
-  eval_hter = (eval_far + eval_frr)/2.0
-  
-  
-  APCER_dev, BPCER_dev = bob.measure.farfrr(dev_neg,dev_pos, bpcer_1_percent_threshold)
-  
-  ACER_dev= (APCER_dev+BPCER_dev)/2.0
-  
-  # BPCER_dev should be 1%
-  
-  
-  APCER_eval, BPCER_eval = bob.measure.farfrr(eval_neg,eval_pos, bpcer_1_percent_threshold)
-  
-  ACER_eval= (APCER_eval+BPCER_eval)/2.0
-  
+    APCER_eval = float("{0:.2f}".format(APCER_eval*100))
 
-  APCER_dev=float("{0:.2f}".format( APCER_dev*100))
-  
-  BPCER_dev=float("{0:.2f}".format( BPCER_dev*100))
-  
-  ACER_dev=float("{0:.2f}".format( ACER_dev*100))
-  
-  APCER_eval=float("{0:.2f}".format( APCER_eval*100))
-  
-  BPCER_eval=float("{0:.2f}".format( BPCER_eval*100))
-  
-  ACER_eval=float("{0:.2f}".format( ACER_eval*100))
-  
-  print("bpcer_1_percent_threshold : ",bpcer_1_percent_threshold,"BPCER_dev : ",BPCER_dev)
-  
+    BPCER_eval = float("{0:.2f}".format(BPCER_eval*100))
 
+    ACER_eval = float("{0:.2f}".format(ACER_eval*100))
 
-  metrics=[legend,APCER_dev,ACER_dev,APCER_eval,BPCER_eval,ACER_eval]
+    print("bpcer_1_percent_threshold : ", bpcer_1_percent_threshold, "BPCER_dev : ", BPCER_dev)
 
-  return metrics
+    metrics = [legend, APCER_dev, ACER_dev, APCER_eval, BPCER_eval, ACER_eval]
 
+    return metrics
 
 
 def parse_arguments(cmd_params=None):
@@ -136,10 +125,10 @@ def parse_arguments(cmd_params=None):
         An absolute path of the score file.
     ``eval_name``: py:class:`string`
         An absolute path of the score file.
-        
+
     ``legends``: py:class:`string`
         Legends of the score files.
-        
+
     ``save_path``: py:class:`string`
         An absolute base path for saving the performance table and ROCs.
 
@@ -147,7 +136,7 @@ def parse_arguments(cmd_params=None):
     """
 
     parser = argparse.ArgumentParser(description=__doc__)
-    
+
     parser.add_argument("-df", "--dev-score-files", nargs="+", help="A list of dev score files in Bob 4-column format",
                         default=[""])
     parser.add_argument("-ef", "--eval-score-files", nargs="+", help="A list of eval score files in Bob 4-column format",
@@ -156,7 +145,7 @@ def parse_arguments(cmd_params=None):
     parser.add_argument("-l", "--legends", nargs="+", help="Legends", default=[])
 
     parser.add_argument("-s", "--save-path", type=str, help="An absolute base path for saving the performance table and ROCs, default=.",
-                        default = "dump")        
+                        default="dump")
 
     if cmd_params is not None:
         args = parser.parse_args(cmd_params)
@@ -168,54 +157,46 @@ def parse_arguments(cmd_params=None):
     legends = args.legends
     save_path = args.save_path
 
-    return dev_file_names,eval_file_names, legends, save_path
-
+    return dev_file_names, eval_file_names, legends, save_path
 
 
 def main(cmd_params=None):
-    
     """
 
     """
-    
-
-    dev_file_names,eval_file_names, legends, save_path = parse_arguments( cmd_params=cmd_params)
 
-    assert(len(dev_file_names)==len(eval_file_names))
+    dev_file_names, eval_file_names, legends, save_path = parse_arguments(cmd_params=cmd_params)
 
-    if len(legends)!=len(eval_file_names):
-      legends=[]
-      for i in range(len(eval_file_names)):
-        tname='System_'+str(i)
+    assert(len(dev_file_names) == len(eval_file_names))
 
-        legends.append(tname)
+    if len(legends) != len(eval_file_names):
+        legends = []
+        for i in range(len(eval_file_names)):
+            tname = 'System_'+str(i)
 
+            legends.append(tname)
 
+    column_names = ["Method", "APCER", "ACER", "APCER", "BPCER", "ACER"]  # BPCER to de removed later
 
-    column_names=["Method","APCER","ACER","APCER","BPCER","ACER"] # BPCER to de removed later
-
-    df=custom_df(column_names)
-    
-    #df.add_row([EER,APCER,BPCER,ACER,BPCER10,BPCER20,BPCER100])
+    df = custom_df(column_names)
 
+    # df.add_row([EER,APCER,BPCER,ACER,BPCER10,BPCER20,BPCER100])
 
     for i in range(len(dev_file_names)):
-   
-        tmetrics=get_metrics(dev_file_names[i],eval_file_names[i],legends[i])
 
-        df.add_row(tmetrics)
+        tmetrics = get_metrics(dev_file_names[i], eval_file_names[i], legends[i])
 
-    performance_table=df.get_df()
+        df.add_row(tmetrics)
 
+    performance_table = df.get_df()
 
     print(performance_table)
-    os.makedirs(save_path,exist_ok=True)
+    os.makedirs(save_path, exist_ok=True)
 
-    savename_csv=save_path+"/performance_table.csv"
+    savename_csv = save_path+"/performance_table.csv"
 
-    performance_table.to_csv(savename_csv,index=False)
+    performance_table.to_csv(savename_csv, index=False)
 
 
 if __name__ == "__main__":
     main(cmd_params=None)
-
diff --git a/bob/paper/mccnn/tifs2018/script/string_replacer.py b/bob/paper/mccnn/tifs2018/script/string_replacer.py
index 71daee0..83c7ac0 100644
--- a/bob/paper/mccnn/tifs2018/script/string_replacer.py
+++ b/bob/paper/mccnn/tifs2018/script/string_replacer.py
@@ -1,41 +1,38 @@
 import numpy as np
 
-string='python bin/scoring.py -df \
+string = 'python bin/scoring.py -df \
 <PATH_TO_RESULTS>/grandtest-color-50-LOO_fakehead/scores_mean_fused_dev -ef \
 <PATH_TO_RESULTS>/grandtest-color-50-LOO_fakehead/scores_mean_fused_eval'
-channels=['color','depth','infrared','thermal']
+channels = ['color', 'depth', 'infrared', 'thermal']
 
-unseen_protocols=['-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-commands=[]
+commands = []
 
-for idx,protocol in enumerate(unseen_protocols):
+for idx, protocol in enumerate(unseen_protocols):
 
+    nstring = string.replace('-LOO_fakehead', protocol) + " "
 
-	nstring=string.replace('-LOO_fakehead',protocol)+ " "
+    print(nstring)
 
-	print(nstring)
-
-	commands.append(nstring)
+    commands.append(nstring)
 
 print("ALL")
 
 print("&&".join(commands))
 
 
-
-commands=[]
+commands = []
 
 for channel in channels:
 
-	nstring=string.replace('color',channel)+ " "
+    nstring = string.replace('color', channel) + " "
 
-	print(nstring)
+    print(nstring)
 
-	commands.append(nstring)
+    commands.append(nstring)
 
 
 print("ALL")
 
 print("&&".join(commands))
-
diff --git a/bob/paper/mccnn/tifs2018/script/version.py b/bob/paper/mccnn/tifs2018/script/version.py
index 6d1f941..e675c94 100644
--- a/bob/paper/mccnn/tifs2018/script/version.py
+++ b/bob/paper/mccnn/tifs2018/script/version.py
@@ -5,9 +5,9 @@
 Print the text
 """
 
-def main():
-  """Print the text"""
 
-  print ("Print test text")
-  return 0
+def main():
+    """Print the text"""
 
+    print("Print test text")
+    return 0
diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py
index e16c819..76a9662 100644
--- a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py
+++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py
@@ -32,39 +32,39 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
 """
 
 #==============================================================================
-# Initialize the bob database instance 
+# Initialize the bob database instance
 
-data_folder_train='<FASNET_PREPROCESSED_FOLDER>'
+data_folder_train = '<FASNET_PREPROCESSED_FOLDER>'
 
-output_base_path='<FASNET_CNN_OUTPUT_PATH>' 
+output_base_path = '<FASNET_CNN_OUTPUT_PATH>'
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-PROTOCOL_INDEX=0 
+PROTOCOL_INDEX = 0
 ####################################################################
 
-frames=50
+frames = 50
 
-extension='.h5'
+extension = '.h5'
 
-train_groups=['train'] # only 'train' group is used for training the network
+train_groups = ['train']  # only 'train' group is used for training the network
 
-val_groups=['dev']
+val_groups = ['dev']
 
-do_crossvalidation=True
+do_crossvalidation = True
 ####################################################################
 
 if do_crossvalidation:
-	phases=['train','val']
+    phases = ['train', 'val']
 else:
-	phases=['train']
+    phases = ['train']
 
-groups={"train":['train'],"val":['dev']}
+groups = {"train": ['train'], "val": ['dev']}
 
 
-protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+protocols = "grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX]  # makeup is excluded anyway here
 
-exlude_attacks_list=["makeup"]
+exlude_attacks_list = ["makeup"]
 
 bob_hldi_instance = BatlPadDatabase(
     protocol=protocols,
@@ -73,36 +73,35 @@ bob_hldi_instance = BatlPadDatabase(
     landmark_detect_method="mtcnn",  # detect annotations using mtcnn
     exclude_attacks_list=exlude_attacks_list,
     exclude_pai_all_sets=True,  # exclude makeup from all the sets, which is the default behavior for grandtest protocol
-    append_color_face_roi_annot=False) 
+    append_color_face_roi_annot=False)
 
 #==============================================================================
 # Initialize the torch dataset, subselect channels from the pretrained files if needed.
 
-SELECTED_CHANNELS = [0,1,2] 
+SELECTED_CHANNELS = [0, 1, 2]
 ####################################################################
 
 
-img_transform={}
+img_transform = {}
 
-img_transform['train'] = transforms.Compose([transforms.ToPILImage(),transforms.RandomHorizontalFlip(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
-                                 std=[0.229, 0.224, 0.225])])
+img_transform['train'] = transforms.Compose([transforms.ToPILImage(), transforms.RandomHorizontalFlip(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                                                                                                                                                                              std=[0.229, 0.224, 0.225])])
 
-img_transform['val'] = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
-                                 std=[0.229, 0.224, 0.225])])
+img_transform['val'] = transforms.Compose([transforms.ToPILImage(), transforms.Resize(224, interpolation=2), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406],
+                                                                                                                                                         std=[0.229, 0.224, 0.225])])
 
-dataset={}
+dataset = {}
 
 for phase in phases:
 
-	dataset[phase] = DataFolder(data_folder=data_folder_train,
-						 transform=img_transform[phase],
-						 extension='.hdf5',
-						 bob_hldi_instance=bob_hldi_instance,
-						 groups=groups[phase],
-						 protocol=protocols,
-						 purposes=['real', 'attack'],
-						 allow_missing_files=True)
-
+    dataset[phase] = DataFolder(data_folder=data_folder_train,
+                                transform=img_transform[phase],
+                                extension='.hdf5',
+                                bob_hldi_instance=bob_hldi_instance,
+                                groups=groups[phase],
+                                protocol=protocols,
+                                purposes=['real', 'attack'],
+                                allow_missing_files=True)
 
 
 #==============================================================================
@@ -116,18 +115,17 @@ ADAPT_REF_CHANNEL = False
 ####################################################################
 
 
-
 batch_size = 32
 num_workers = 0
-epochs=25
-learning_rate=0.0001
+epochs = 25
+learning_rate = 0.0001
 seed = 3
 use_gpu = False
 adapted_layers = ADAPTED_LAYERS
 adapt_reference_channel = ADAPT_REF_CHANNEL
 verbose = 2
 UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
-training_logs= output_base_path+UID+'/train_log_dir/'
+training_logs = output_base_path+UID+'/train_log_dir/'
 output_dir = output_base_path+UID
 
 
@@ -135,6 +133,6 @@ output_dir = output_base_path+UID
 # Load the architecture
 
 
-assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
+assert(len(SELECTED_CHANNELS) == NUM_CHANNELS)
 
-network=FASNet(pretrained=True)
+network = FASNet(pretrained=True)
diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py
index 62c473b..1ac899f 100644
--- a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py
+++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py
@@ -32,26 +32,26 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
 """
 
 #==============================================================================
-# Initialize the bob database instance 
+# Initialize the bob database instance
 
-data_folder_train='<MCCNN_PREPROCESSED_PATH>'
+data_folder_train = '<MCCNN_PREPROCESSED_PATH>'
 
-output_base_path='<MCCNN_CNN_OUTPUT_PATH>' 
+output_base_path = '<MCCNN_CNN_OUTPUT_PATH>'
 
-unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
+unseen_protocols = ['', '-LOO_fakehead', '-LOO_flexiblemask', '-LOO_glasses', '-LOO_papermask', '-LOO_prints', '-LOO_replay', '-LOO_rigidmask']
 
-PROTOCOL_INDEX=0 
+PROTOCOL_INDEX = 0
 ####################################################################
 
-frames=50
+frames = 50
 
-extension='.h5'
+extension = '.h5'
 
-train_groups=['train'] # only 'train' group is used for training the network
+train_groups = ['train']  # only 'train' group is used for training the network
 
-protocols="grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX] # makeup is excluded anyway here
+protocols = "grandtest-color-50"+unseen_protocols[PROTOCOL_INDEX]  # makeup is excluded anyway here
 
-exlude_attacks_list=["makeup"]
+exlude_attacks_list = ["makeup"]
 
 bob_hldi_instance_train = BatlPadDatabase(
     protocol=protocols,
@@ -60,25 +60,24 @@ bob_hldi_instance_train = BatlPadDatabase(
     landmark_detect_method="mtcnn",  # detect annotations using mtcnn
     exclude_attacks_list=exlude_attacks_list,
     exclude_pai_all_sets=True,  # exclude makeup from all the sets, which is the default behavior for grandtest protocol
-    append_color_face_roi_annot=False) 
+    append_color_face_roi_annot=False)
 
 #==============================================================================
 # Initialize the torch dataset, subselect channels from the pretrained files if needed.
 
-SELECTED_CHANNELS = [0,1,2,3] 
+SELECTED_CHANNELS = [0, 1, 2, 3]
 ####################################################################
 
-img_transform_train = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),RandomHorizontalFlipImage(p=0.5),transforms.ToTensor()])
+img_transform_train = transforms.Compose([ChannelSelect(selected_channels=SELECTED_CHANNELS), RandomHorizontalFlipImage(p=0.5), transforms.ToTensor()])
 
 dataset = DataFolder(data_folder=data_folder_train,
-					 transform=img_transform_train,
-					 extension='.hdf5',
-					 bob_hldi_instance=bob_hldi_instance_train,
-					 groups=train_groups,
-					 protocol=protocols,
-					 purposes=['real', 'attack'],
-					 allow_missing_files=True)
-
+                     transform=img_transform_train,
+                     extension='.hdf5',
+                     bob_hldi_instance=bob_hldi_instance_train,
+                     groups=train_groups,
+                     protocol=protocols,
+                     purposes=['real', 'attack'],
+                     allow_missing_files=True)
 
 
 #==============================================================================
@@ -92,18 +91,17 @@ ADAPT_REF_CHANNEL = False
 ####################################################################
 
 
-
 batch_size = 32
 num_workers = 0
-epochs=25
-learning_rate=0.0001
+epochs = 25
+learning_rate = 0.0001
 seed = 3
 use_gpu = False
 adapted_layers = ADAPTED_LAYERS
 adapt_reference_channel = ADAPT_REF_CHANNEL
 verbose = 2
 UID = "_".join([str(i) for i in SELECTED_CHANNELS])+"_"+str(ADAPT_REF_CHANNEL)+"_"+ADAPTED_LAYERS+"_"+str(NUM_CHANNELS)+"_"+protocols
-training_logs= output_base_path+UID+'/train_log_dir/'
+training_logs = output_base_path+UID+'/train_log_dir/'
 output_dir = output_base_path+UID
 
 
@@ -111,6 +109,6 @@ output_dir = output_base_path+UID
 # Load the architecture
 
 
-assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
+assert(len(SELECTED_CHANNELS) == NUM_CHANNELS)
 
-network=MCCNN(num_channels = NUM_CHANNELS)
+network = MCCNN(num_channels=NUM_CHANNELS)
diff --git a/doc/index.rst b/doc/index.rst
index d2752ac..6569c15 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -25,9 +25,9 @@ User guide
 .. toctree::
    :maxdepth: 2
 
-   running_baslines
+   running_baselines
    running_fasnet
-   running_mccn
+   running_mccnn
    references
 
 
diff --git a/doc/running_baslines.md b/doc/running_baselines.rst
similarity index 95%
rename from doc/running_baslines.md
rename to doc/running_baselines.rst
index a107022..fea3a3a 100644
--- a/doc/running_baslines.md
+++ b/doc/running_baselines.rst
@@ -14,8 +14,8 @@ can be installed as
 
 The steps to reproduce the results for grandtest protocol are listed below. 
 
-Color channel
--------------
+A. Color channel
+----------------
 
 1.A.1. IQM - LR  
 
@@ -140,7 +140,7 @@ D. Thermal channel
      <PATH_TO_BASELINE_RESULTS>/thermal/haralicksvm/grandtest-thermal-50/scores/scores-eval 
 
 
-E. Score fusion (haralick-svm)
+E. Score fusion (Haralick-SVM)
 ------------------------------
 
 .. code-block:: sh
@@ -169,12 +169,12 @@ F. Score fusion (IQM-LBP-LR)
      <PATH_TO_BASELINE_RESULTS>/mean_fusion/grandtest/scores_mean_fused_eval 
 
 
-BASELINES in LOO protocols (for scorefusion)
-============================================
+BASELINES in LOO protocols (for score fusion)
+=============================================
 
 For the experiments first do individual experiments for all channels, after that perform fusion. This needs to be repeated for all the protocols
 
-A. color
+A. Color
 --------
 
 unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
@@ -184,8 +184,6 @@ The protocols can be easily indexed as
 PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
 
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -199,11 +197,9 @@ PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
 
 
 
-B. depth
+B. Depth
 --------
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -220,8 +216,6 @@ B. depth
 C. Infrared
 -----------
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -237,8 +231,6 @@ C. Infrared
 D. Thermal
 ----------
 
-1.
-
 .. code-block:: sh
 
      ./bin/spoof.py \
@@ -255,10 +247,8 @@ D. Thermal
 E. Score fusion
 ---------------
 
-The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'
-
+The protocols are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'
 
-1.
 .. code-block:: sh
 
      ./bin/python bin/mean_fusion.py -c \
@@ -274,11 +264,11 @@ The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_pape
 
 Similarly, repeat the experiment for all protocols.
 
-BASELINES in LOO (haralick svm)
-===============================
+BASELINES in LOO protocols (Haralick-SVM)
+=========================================
 
 
-color
+Color
 -----
 .. code-block:: sh
 
@@ -286,8 +276,8 @@ color
 
 Repeat the same procedure for all four channels. 
 
-Scorefusion Haralick-SVM
-------------------------
+Score fusion Haralick-SVM
+-------------------------
 
 Once scores from all channels are available. Run the following command. 
 
@@ -295,7 +285,7 @@ Once scores from all channels are available. Run the following command.
 
      ./bin/python bin/mean_fusion.py -c <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev -s <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/ 
 
-scoring for Haralick Mean fusion
+Scoring for Haralick Mean fusion
 --------------------------------
 .. code-block:: sh
 
diff --git a/doc/running_fasnet.md b/doc/running_fasnet.rst
similarity index 98%
rename from doc/running_fasnet.md
rename to doc/running_fasnet.rst
index 5e139b6..8a57b02 100644
--- a/doc/running_fasnet.md
+++ b/doc/running_fasnet.rst
@@ -1,6 +1,6 @@
 
 Training FASNet for face PAD
-===========================
+============================
 
 This section describes running our implementation of FASNet on WMCA dataset. It is **strongly recommended** to check the publication for better understanding
 of the described work-flow.
@@ -11,6 +11,7 @@ FASNet accepts RGB images only, hence the preprocesing is done first. This can b
 
 
 .. code-block:: sh
+
 	./bin/spoof.py \
 	wmca-color \
 	fasnet \
@@ -23,8 +24,8 @@ which is notated from here onwards as  ``<FASNET_PREPROCESSED_FOLDER>``.
 
 
 
-Training FASTNET
---------------
+Training FASNET
+---------------
 Once the preprocessing is done, the next step is to train the FASNET architecture. A config file is defined which 
 can define the transforms, image resolution, training parameters such as number of epochs, learning rate and so on.  
 
diff --git a/doc/running_mccn.md b/doc/running_mccnn.rst
similarity index 100%
rename from doc/running_mccn.md
rename to doc/running_mccnn.rst
diff --git a/dump/performance_table.csv b/dump/performance_table.csv
new file mode 100644
index 0000000..82424a0
--- /dev/null
+++ b/dump/performance_table.csv
@@ -0,0 +1,2 @@
+Method,APCER,ACER,APCER,BPCER,ACER
+System_0,0.68,0.84,0.6,0.0,0.3
diff --git a/submitted.sql3 b/submitted.sql3
new file mode 100644
index 0000000000000000000000000000000000000000..46418b0e76730cd3f0a48125ab2b30f96b46f7ad
GIT binary patch
literal 90112
zcmeHw3yd4dnI302?-%vfY9*~^SKeJ|Et36yjJ@_!Lu!UA4rhksNIkaKG>6kO?A4H*
z;$tMOz4p%fdG^^kJ7?cHz#%yA9WFs~0dlw?3GOaIoB%;^hf8n<5+FeW#0MYX;DR6q
za>2m~oWH8O*<IZnQ97+Q&ZY>B$nLJLuj;SA{(Ak7;(E^NnDaZ$R=v`h7bC|aLqm~Q
z=I0}k$j{*K4E!B~zajXG!rut|xgUIQ(C257#I?0qNEMy>ab)&)XJ+7weHaB81sDYw
z1sDYw1sDYw1sDaMJO$pKkB*;x@x`Iff38#6+B08mZl%qAv$1V9s`SOkVj+_%W#&t%
z>$%MQb5z3T=C4dmTHEv4d?~Y(Da_|rOY`}ST<+TZ)a2cY)v+48<=2~AWiN^Q#M~~!
z9J|lMwL*3!RoI-rnc1AbVr^f=FWguyWU@<neD4aK?bZ20=0>KF$uDM#q-;>4=ei9G
zb<OAczDnUf@a?IqpMQRM{OrYxLtnZ`NR(=|D)*uK?st?XjC-5V#4ST#%FxsZ)2H33
zbh_>NTdBh0a;k7eClzU#?e1R3%@0$hUa8ishFNY@>Six7&ym#a?pEFElsgtYN<-_F
zvMU+<4rJZxb9={XSnXOkIn@|$okDcAYw$o~V(i7`%;L@YD^BgQ`S~j^+Qsac+b>?5
ze-YZOYyWcE7~eGSnbj`lBYe^BR;y;ajeqV`ti5i_eDUhlsjFYe4vn8Zd2;A0_gz+o
zRvM;T2ks1Rm};|LuQayHdqCD6MFm+VHKxcc*kmR}rW?R{@}S1qZCTspN^7@UHya(V
zon0WBJum^v+g2-7p1tO7-}@EJrsa001w;$I)b2F*%gsjFA>aJ<)zw@kl_#w6?y$^5
zGVjrx)ngO7q?lj0YxDe7(ipu&q^G=#yfg}ucPRE#EPqehUr$~XGXzEfMgc|vMgc|v
zMgc|vMgc|vMgc|vMgc|vMuGQ|0w*T$`ac@`?-+D8_S4v3*gxOPGB7Mg0Y(8v0Y(8v
z0Y(8v0Y(8v0Y(8v0Y(8v0Y-r*L4nCC<U0Ne{5N(H>*k+~)g!ULjQu3`Ut)h6`wy`n
z#QrGuFJiwR`v<Y#j{Ror*J9s}eLeQY*qgB~Bx4^&0Y(8v0Y(8v0Y(8v0Y(8v0Y(8v
z0Y(8v0Y-sA6c{@>0_OG86JsaH_xRXx{5>@>c8q+FkHzr!<iyw<`5qse#orU-V>9@B
zEIKw#zK2Jr;CpgxY!Y9NM#m<|_weXAzCAiNhA&5=W25AIcyt8c9vO?`%jodvFushA
z4&kq1T;y@%CHOztGV9HbnY6a8%6_7KP*2vHbu&?^cB}(4S=lw4tzG!;G&*R)7=ssb
zi{(@<hbE0_xG)>#rR#}}(v7%=ri@9rv$&d1r^*>4gQkruIDe9HvfDOWz1-?xhf{4f
zwku+C%W5RaPc&nkgyi{Rv7FA_NNwaw<@EJ(X>%=uW)19vppEv>oPj+NV1pqPBZaxA
zDP=`M$BY=<H_}Vxl}vu4yav`%Spyw6u$sfYovIl}C&-(*RDKDaG^XITUGi>cM?<F!
z?C+p_D_baSq;i@3t=omv+FGViPOW9pY2%OKVg1g5)#`LBduHQczE$^K?6rLt?OOA$
z@A7W5b*I^wuWWA%zPr`!HuzrHuT<{<b=&jRZQrxKHubF2+}+(XclVlGl|AY~b>H_=
z+w6E(`>p2dX0<b4zvKR@Zo9wgcRF=2emg*CjB%(`I$K=Jr8d!7LYoCH!J(%JRabNA
zwcBZQ&UhJ8g&Cw{*7p-z7O)9e-&8rF*RCszi}`$czp`((oa&*c9j3^w7D}1I3OY}&
zJFTu+ZSFN&=z=i=7&g;5&34iFFou0F-#{(e$*tyAf}kbp)oP=W=vX^#f#Wswj4=UO
zV1Bq&$`{Zj13Rry(LAYkAp^ER#SD7Zz`I~lLQimUEncZ_10y|0?iGv6<<v%Lxm;Xb
zyooLw&*Jg`>5}bsEq=e0;ul|iV{3EudJ)&W(Mj;KuJh;vgx$)EneqyH-hfvk=mp~h
zd@bct=}dVsRZ8Vnm&$9Y(lUCHlxP86G3FovE@&}REM`~p<!l;VHE{nFc=VES%6Ym}
z$Yg+iWiWC}mDAY*`k-+U9wiSdt>m7y1%JD>&UQVyUFlTZ=0Mj-=8GGJ9DtXvZ{*Xt
z42m1i;TO$L6>77WteUM(J6Wy7@egaqs#ZE?J5g<QP=av9Mln-BNkTVA79h?z4msGB
zT3JnR<T7@h_}UdhfwdIaNFsr}z6W+}NF=>8ohjZdt**HhMG{GXdnb}f2gfX@7`Jfg
zLNe(QSng=9PzmDl>PiM_MA$eyh6P@aXu5$NpEwM;Sz3lB!NP38xQYw*V5Y>vZ2x|z
z)@+~;d5pA_$(PD&(5@dQ<iX4$pqB}Zzu}3f`v{@LJ)TD&B~t2U;kpc+Dg{M;#kc?^
zxRK3e76>oiX>G~XYMEDLv0TWj_v&w;6amGVx_nYZ*NyYI;N3PbO0!zoOSWs3mbpM~
zke_IgbV<y&q}C{1dx11qDhC#^>CIxMR9w!0#LpNXgq%Rwv}@+x-U4vG*{bxuv7Kwx
zT&V0>mA2Vlz{Y>*2BEUQk`5D~C9m<93D|Y$k-*nIu&nWvM{h6+ZFV8|WV^Z3xeLWd
zuljg!Eo-c#)@&ZU2|RcjUUUV@>dIQF3{0B6kp-U28BfErD@Tcqt7wHtBexKwr{;}o
z4=6~|B}cN^?ZngOmQ`t>RZ{lwa8g_>WY<b)&ERp4xJ5_<Bt7AMw^i8(;TgV@XxGp>
zp&5~)1=6$cb<9>hF35^4N{fWO?6xYAHYKC$Rt4VOAfxrIQl@+>Q^0Zt-6FY33df_{
zJ+>)=3A9Z}!+0JS5(fX|PRlg6&GsFzsZ6G=_WoYw{z|0*(x8<rnw1WRHhawsvIdNl
zVdS>^I{Fv^F>Wko(8q}^Tik%j!L4jD3r5zE>Q)vEn$af+)9bp9UURCDyOl$qBqCBk
z<(@3Zr9+=0N!kRsLLT8keFJ0;+9H=^U|%pBs7fwxXY=XR+tB;A$)%{toF-{1GD-hh
zO0PJA7wvc~xl58_N@w#oR#D9s7G8s4x?7E%CbEcN1gTDX=<Af2yhFGkeSM?oiaZd(
z#d0d0E`X><d!##q4fnl;{Yt0ya<bKJ*s~<uqmx_RHmdh#HHD2lHf<)7zd_obPL7(S
zHU=KFRtwqECfYYHd2H(!aLI&G>4J!AS)Ka`g`^PS`IbF%8`$_TS!p&n)b5oNy4{T|
zNFV6Ps6&WI2AX@@yQoXZOR_&OreT0{WND~xC)+UJ)Dk?5%WaEr@ZLRpbY2)Bejhj|
zz&Q=dfJ{Vv>H3!qOn<?Xz$2B+m*)c}!h?$E)~|dZJxpzS$@lNe50jo0(i<mwm*hw9
z${s{^!5F`74<gP_dt~fGv1-fO??75R0mX`tX|e^z@#roQ=eE#xXa6t{NB2k{I!a!n
z`-Jh_nbS%tPi7u(7)!wVb~&vM3`qBQap4s*W+p)1#BrZX!YHgN*T~Oantn;pr-|e&
zZWc?K6?-}I88Sz9Wc`~&N|n)P$;d&3I<6Ja9(|5<fZ`I2t9bZ)i*x`oE*=trvT`$5
zE-m9RHk)56!$cfLsJDr9!+s`m=<}qb<M&|#vY0Nf!4w*Ofyhc(RM8iE*%ww<OXy3s
zz&^4We4Pq?nFuPN7wL{)A?rZ5f%>$d@6{wY4nV(PoC5-v%Qsin%7x5Q7D!Xbq|3SN
z^|HdLI{K<{8J=_Z)E!kecSK#Sa9X@t*)g}wZ7B}ZRaiRg%ZYXqeXUmqvZnh*a_M$=
z^mQVgFag$AQW(T0!8aTdDCn<s3Au=S0`yDdHsZI@H%ayCW*v`Q=v!nqEiIwHPTI|C
zmba{3^vho%$#FY<`!)2PZaWeAr5~(0Op0fl?u0NIUeqK@nPMpki{K<~&CZ=N_$X*6
z+tp^vjGG6Qy|<$I1S~`JI48$BWuBK81Ytqq6PzH)g64msEeP;WO~|s$Yh37wq$UJG
z=5)#N2mw@mmp*vqb20j}NbFZ)@wtCKw>kUYX1_Rle&+AbEKUE%>Gt&G)Zd(vC;xD=
zGVwnrzCLkz{2!06j{Rut(__a+e{1yRkspj$(Vs=X9sS_&KO4S1^yfo|LuVp?Cqh;{
zU{&y2?@|$2HjDy{0#AklwTn}*3wVCwVk(uo=m?jCY6D~qull7BkzAsl&{a|9W&aap
z0gE*$p~<=|P*2naQCZ;BgedB=s@E=fNEc`%!JnTGNfMO66L~=v6DrT?D)o-CAczY*
zmyk7u7p2;H59vIG#7Tktgax}!qD18fWgtkB#`BWsf5%3W6TB{Qx=?%CLwcG<qTce=
zNsx7aelC)Xmm-wh2f8W?BA-y;zNFU9c}V9dB%XT9hXkvKen_e!i99v-kQU_i1kcN|
zB-ftuke;HDIO;7Q5~uMYBmvq<RX90JSD-k|4Mah%o%N8;(n!JloOTj)Jy-@PhL})v
zO&5Y~D=&yJp6a5cDYY{m(isX#ID&eD&IijtkT5<Md<T&9gber!S3B(?ou-hEYA2Wn
z^sSSws-hAu1Ljv=5f!a=%0oIuBL(wwsi)Gs3>A`af0BW})NoJZ^n}7Iip1AWdPpZ}
zq#(^*Bt;HN4}t`&ulK1FrzLb*;Uuwk!b3ViAwg#g&Z9`rg*}g883>ZbiO|sk@5l>)
zB#R*0^xAO`={SuPe9Ntq6zWd|NmViX_eIiRHpkVDc}T}-q#(^*BvA=U4+(QQz$jvn
z_oM|;ODLiQLZTM)kYY4aP!hOE0zKCx909T%dW9a6y#z@W6xcPX&3Q<36cQh74Ht<I
z^;}XXMb&gE$gh9|(g+$><Z81X(kzV>e5;2PDg!~1bzTVdL4pLt=6Int;~~vZNZe8F
z#0kN65*K8ko+c=v(NkK0{v->k#4ELF4{4f0qDNL}4W~}LE(U80NWc-mdU}{6fXg*r
zQdF@v<snT`NFbQ|_9q^AHt@u*lPt=icSxOdNtY$5Ht8WvQb_y})Z<lp49DUXQxBLt
zEMW-}2ndenY7-vP1dSBzg)a4!5RF9KpHyB~<Y4JUygn0nl~<+OxQ8@OAssc3;PHS*
z(-n}U1m0+rf;|I}V8+6WFznaHJftxS=}3-%fi;YT$6OAkETJb*C+L9CwzW|YX_P`b
zYIKJQQ2#oCgq4E4XV(d4O)xha@sLI+q|nIfwi681fszZD%N5w0R>DZUkboj^FuF%Q
zq$rIP?4K@@AooXt-V_>7MBH;VT@j(@4tq$$6cTq760k(SI>B^BSHippNH9MG>AO2L
z6`6GA<up)mV0Ej+3E?NCJ3-Nc(+HR>3kgxzB$dbO|I0(wNbEbYYjfY9GiLwm?Ax<v
zXa4R?YWm+!-<gg~eS7Mq$$vUon)va=XD3dM|E=+t$Npq&cl3XaeslDNk>49xi~cD3
zMl?438^ik09}iVW)1#+H{%qtQj(lZgbL6GyPosYn{k3Q#`qAjI;r~4RJHwwJE)73F
z^cO?_a_CowtRd@&C5@#5m51y#$ypznIWat)gzXd#hASD?7=ALV3la?WTtd_oO{AJe
zgQ_pcT0&F>O;xQAdQj6eRG*n4fdU~!W#~e2cq5VGDjP~pSg&{>^E61Pt2*^jz&B>U
z`f!4v!&v1LF#sYih(dw~nWPKWM?I)ZG?W;W7cLa9P>kc&hu1+uQ$)oZR9Kop*(6c5
zKH@<=Lqmm@I4%^Y^(CDEYe$VL9)aRvqfEqW66<9T>LLx*XKc0^BCt|$YXvk4v>$=u
z_=Kj4f~Z*^_Mk4%P@##d%X>Q4mw=)UQ`$hSV5tmHD%4jHtq*xn=V>T<B}nE0cC9pe
zG<9nQJFBWrNqt}o3I1U==Vi`X@SvWip+t(Cd{Bx^2`(2(PysB!7Qlh9%vW^|Mi@)?
zpw7`y^zMKUN~T-Ug@W4}JwB5jB`Sn~s%~i>)KfH+M3vA71#?P&?{T3xIEoLo703x0
zFHB@fdoM+j;??R5T`Ps+CSR>!71Xa*63=T|P$WTH39twfHBqu;59%}x6<WW!%+Axi
z=@JlxtVRz&q^$&)#|sjta+c&louZ*=y8CJcLwny^sZiS>v%`p{z!F^IWW^FasFO5Q
zXl!?D#r2ozqN?aJ)j>$DU^gCi=_OsY1P|&24MlIUc(uZu*{@cj3iGyLtpt1|Ai$0r
zuUNbXb)1IkGjp>?Dh+zSZ>VtFij#QgS^g(bD;^JuFmcmBFmWE#F&avuh~cZ1D);SI
z0<2Hy=^lZCK}6DFJuO*D4=P4Og?1#Iwt{sHRS36Myb7hK3PIXR24yxy6cx@&cu;dR
zRA?&gLcxBbpG+<kY@l(Wwt|VimH=K>VOA0Mpk`?(dRX@n5LQwBpafW<)59sL6|C??
z1@>li>zW5OLqpL+g%3*9`m=??1vVZ43Z#Vy%OOtE_4j11q{}j_V62zCT20Zl3N1O@
zwh~~?(TCY#$p{_Q|Af>EhDMm)h$3%Y^`ItcD0)iiBOp(&h+MWHGLY)rq^)4>CCMr@
zqjklDnxLWRIh_wGFyr;0KvV>o9XL?HTS2&Az34%W(@=fZSVUUDbcK=$E|j2&Fgync
zNT$#_EUN{{dclJlqoMjNlWizXqa+<b!B!e<Y)UGO6#*!a?D%kjgRL~{c@Juoh6=5v
z9JUZu(6OW0!iM5-C-6V9`<0#$6dCp+tq*umBQ#WKW2^^7&*ohM!rE1(=OYA4P!ghm
zXHwQ>4=PGS(Gx9?EkuPLYh5T_;x&550H8p!1GCF2Y&}`ec~HYNRA>|0t(6R5Gy%!V
z3gjG~#rmGO%nqwfUg2xc!UEP=#L-CfpyaC*tgQU`xs`$`yC4O*0!Bud2I3P7tu_yp
za+b>T)LNMyc?c5OG=^cFVs&z;17ng5yQsn1!WJCNmozwW68YLC59tz(L{Y+rq|iqr
zE|LO!TnhCL=>xEpt3qcJ_1ZHY(lay?)$4soSeew<8szv0PgF!%qvantmcvIQux92&
zyas1V{Y<H!s0$IRk4Mn3W=j3gdzex`Ho0L+{Y<I9r|r#@`k7KcQ|gaIm{R`$C1R{0
z?P~eDddElU#k*SUeGUB{My;JG^%JRN>%;fd_{opn6;tYGO8xIwrT&%2QR;Urr{9X+
z9J)DwbLh(`(nqtOOGsGphgzbO^F@(M;O$y^PaB@#Jrme=&?NcY*X+kD`@x^t*&jzP
zN4^@F{oK%}W>3xh?U|2E|6sZ{^}naSHT7cTtCPP!xjyk{6Q7<qHvXIA3uFI!_}9m_
zNB?r@4@bW-dU@m@jjRkSk^d3>Vf1cvCieH{{(US?@Luzy+PD07j{0rg^fsraeR>>l
z>`x!sxm!kX+N06iHH6l%4X?u<a;OztTEmv0^_mBDk%pr8^*t!iN($^j!C|=5tn{9=
z+pLlZC(W{T-GdsYp;T(e--F`dK*zt8W7i6_W?>_i-neq1MA)9^HS4wqb(V$#a}(d*
zkOw8e$&&v`4@we1Gb!+d5KxfdG)1sJ;Xz%Xq3DAk9~2y!1u_I}0zDx?m!Y@6-CBWK
z9_%t&B@gOU0E*f#^q_dKiV8ykTY%auy?g6Iad6DS^VX+4sCgQS-c|LWKy{%H0pTP~
z!0L;ELTFf%2s9}1u|wH|x<o^TRQ<?KEbt}hb_Fsd1O$nrhw2M$#X)^R4OX{4=|Me1
zLxnW(Tqq@YY>YKONWTIK1_}YS5)*J7DoLvKaS!S|4Mm^M_y{P`^|d(=ps=Dmb<k(G
z71o}B?OWdZm<RPV4HeSUb!!D%p@Fj7wbCV7qdJHU#ep^*C@k~VrU!K{07dbcuU0}(
z704kVFM=2e6aok+B~%z3pkIOYuB-a=6b(gZ=z|Jq*4hLFX#wXs^p>zoKpu{C1l_vj
zL7kzYXpJc!6sY3_%1$_t<1lLmo&X1eQVl4=!ePpW2X&f;qEFX7DA0KA)32abaB@u_
zEA?uHDX3aS59(w9iaK-hK}o@L45zI?H<?z0aG_MJ7B5-_59$OBMH9mZCDO+Mc3WY=
zq=Bpp5s-r;B9QD7XRUis#{*Cl<NBdO1EAejP<F6(@r?#<tt1`{?^SEfgE~e-!9k4g
zH6N4^5~?;73>EZgBY}cifleteD%Pq86{Dd->eEga1|12i$MpyZsvop*gI6n1R+Fr}
z2Q^1S(T7gHS_KYhdQe~i6{;_EVI`r%DUmK&D<0G=4MiW?c~J0LpMC{w0mnpiUvz7Q
z6>MP|lk=cvXsAB+5^(eiGbWurq_k@V{R#&B(C|VA5tw8_X4b17)D#Ux>*DxorP9g{
zJpw|G^gE<hKtMReg<fxEJ*Y_<ik1mJD21L&^`JnkhlUr@R-lj#wocZv2Q@)MVbLEn
z+$99WW^TSGJtz?D!BbwC-b1b60999cYsrHer=jSHrLR`uDVRN8NieDh8OG*7u-ec$
z>xKt4MnkDo&-X!rN>I2~z!sn+K^vpEZ3Uw}C<s~^4{DT#q6cpu6wH3YP<U=3hg5Yj
z0p)}Wg9Hesv<EdpL(%fg2L)z~VJN7T1}q(%3J_)ojZHWNwiZ38C=J!;NZIaJ0WI1d
zTL^gB7bpZ78f6g<>$zIWJ0_=*`sj^gBoP!_`&0@G3$foc5T+KO&;W8c_Ev;Mc{^(P
z^}f5mzJ*)yQUpv}Bg5n7J+s>FSdCq<ZiR)EPmxWX2US_H_k>w2XxhPquh&}c69KFz
zu>Fg;UiGq!hO+dU#eLDv5>NSDiQzsGU~&ma3I}9QJUr>wDwtjDItB549nM9u@j2hN
zgZl*VVTKLLQ5c)K_hat;Knvt?e62A}0O&_d6M$&~F!z4Md8O<3jcEcfO#r3|z%&8&
zVRjL4-O1eh4bpUA?)@Au&dj}E1UtWD?)}WJ3GV%{>vwSPR}cC3!`|U}#dYwvZ<d?;
z9rVk@mmpl!JGFTMeVcd+tQ~+?K=d8r5l~w~zvA}@_^X~r!0*0>{)X)l@aE$w5161o
zPz4v*57ZyP#t9M}e0bXhuFpX11p!;qQCp9GlL3u)tOS4pkNnw0ECRsw1d9N`A^^bN
zFpB`dA^_O2P7sR#z#;%RF%MV-02Tp&MF4m}_y-mN;2~oZ5LJ8IIr~xT|A$E88bh!x
z0=7lKA^?c*eguI0<E;1X2mqZ{#cK2q0RZLz;IAg6cZjt{z%z~@(rP9|=LT%~vC54i
zKE^`-h@Alz{g18v+1j72{aN%s7X6P!|I=9XKNkIuL?dO<|47_s7X7cU>H>@Yx0htm
z|5)_D_h0nC)yH4^Z<_~4t^C2CCb-b<Sp>ilKUv@7;C~SKf(|8Pul&KZ41#$G%Dcbv
zk3_Kl|Cb}*jl_OC_OaM_<j?1RcdjxwH~V|Dwb|1%-=AsDTp0T3^uL-un7%yqZzA8F
zdK01n{JY67O!5=|Y2s@W+W3#gzd61&E{qRDRDkb|wZ^WGof!S$=<kfaJz5xjZsaE;
z-yiwb$kvE3G93Mr==Y+n==JD{;U7XIfVYPW!_N)<Wa#@t-#TJbV*S9gwKzlBI)GJ=
zFU*E((Lih+z!JKz`36LF03W97yIz*#51S>#q46wHT`;f+!C8vhI`XnS9?a5bTkpPT
zmlD)CdhPE%0gDXqiV8M!@k8v;atMNTry`O0iMtgGo2`V>LrxI;2>j#653yTHC;g=X
z>A_2G{17{rbkaXFAU!Yg@k1OV&Pji9Kzi_t96!X7-kkIo9-JN`!$J9Ryf!EO`2p!6
zo<sZ)M_qH$KRqBlIE9WM;@D|U`f~%)E4mmz#1YV(^iK^+50O0&aU3%z{n-KO!3jeA
z5Jw|((w`ZS9(<z44{=N}C;jOG>A|mi{18V5bJCw0kRJL{{1C_Ya?+n1kRDh%eu$%V
zIq6RfNDoGl@k1PI%SnIy!RZBEQ{sm>VwRKs*nsrl+cbWN<6b%GV}sH|0NO(wy~;^H
zHy}O8>G&azLFJ^M9grT3CF6%Ul9ZEvW<Yu{LX98dcu-FI=>h4%lXUzLM|E=2PYp;9
zwx{t!9Gl5WKRF;hIBbs};s{Gl`iTMQ!45foh~p$V>Bk49$NdFIJ95&G4M-0LsPRJ_
zv&cz5`r!1~EINLOBM&+0M+T$^2Qu-)sFyxEAU%Z8iyz`BKu-R{1JZ->=e0u|%g0GS
zAURkWt{+Bl_O^}Rd~itdc}j5y4l<;O;9xv7MBpI>Z1Z_#{c~QHmx5XP{JhXvDn9oS
z?h|Y`2*aVuuYcCd@|jSUo|&=xqFqWTOSqIc%nMjZ3+r!sSzZif8Gg~uQueteaKXq}
z3h^)b_0M=&UI=E{XArit^t%%9Wyyo>t;DZ?+RO5MFv}yG#~;eEmnFD<0{=Ujxc-Kh
z<<r3|eTLcYi*_lA^Ki#R-F<@RB^)^Qk=E~fS)L1J={crxU$nFIyApAq;4Hy=xX7*F
z^Rj#@l;u$^?sqWc&yu*CU%%^Rc{Y@#=Mckv(Jm!8w&=@5P)JQ$?|NCD4rb}|971JD
zdGT?dV5?tIk`-~i<7Ig&n5ECJjr$@gr2x)I!llGMdB7H1T5o$<o(yK$#{${T62jqx
zv&78<j$ZinmY3y;P?lkeIa&I=gt+C0-lIa8PLW^V_p&?|$}(I%J4^rK#+N0IpeD%c
zO)tw>C`->Vfcv78Wk0b2algQ|hajytye#K}SsvAU1i|Mtzy-ssE<<b-ZoTehIUCH<
z7p%yA(Jm#AL%;?*B$1b()xEywWjPbf@+c7uf;QZ6q(g#d5N-XAm*sRQ%kYb&l+eZd
zuaF?_oWifa?qxX@%<`z-13~lp@+GLNgZGGa%gb^ylx6ruyOdfVlHiJiQ#D0juX$Nc
zgtGJ;J5bHTg9HD*&7&!b%B}BsS&oIW47aOYN)Qg=EO9A;?SysH%W{;;68sPNmicrk
zC11=3_X(CnGIV}^ecQ`&B$#ENm9Je&SY-7rC5#lhj91^x|3CBp&;0-IC)@2>{C+9L
zFTVQ5*5>N<VzRPpHagJvV0c9zAa+Cecyt9lZ{Ti&UNBC;7qj|jR{zZEpIQAE7Yo_7
z5?V8`PaS6U&#eBL)jzZPw@A<d@nOOfFqQs#wc2PTIu=vuXDa<qK&3zTILG?tJy7Sb
zb{=TI5B?d!$GJ-TymiAEa=Zc?b-jhSee4hZJneYun(!nJ_wg#_AwEzy6nztoTrs90
z_2OziohlcXbGLG6)EI(0JQ_33!gs2aFO}1&QtEoDm?@{Th0J1UwZOawl$V(IfVCX6
zAYc{*%z}Vf5HJe@W<gM976i<KptOuy2D2crDo1UDFbe`^K>(4H%`L0aKyQ+zRvCTP
zn1Q7h**^w95J}h-LZ2fZ?63#G#Y`@T-Xae0c3ajqIy6o~;+31Za%s7cNu{&-r7}#!
zGX?ZEIj+j)OPL({yfFu<@cS7fvzRWgtrkk?3kLR<2a&SS7kk;i_YMZ1hBB1<SrCX2
zIEY7IH7-K}?w-0MZEb6sT&aNmX0@_oN_=%YzGrP&&HBEaXgAT<dX-2O7MIa4l1n$V
zDEhi_%%Q;gN~&1O6l@xN!=b^u?mpm8Pd>_C!;MripD87Ai+1jm_nN!y<b$>wT(tmv
zx(_=F{q&58W*G$g=tVt%+OrR%0HXk-0HXk-0HXk-0HXk-0HXk-0HXk-0HeVF&lF(i
z|B=W&O(`o9I%Z&HW+S~+UdiM)%4>zpjjVx=lg%`|qaQ~n$nIM%m0v<9AzF7NVm8?M
zf40cZ|D^;w|7Yj_?EIgd|FiRdcK#n>=l|^dzp>yP1NIMo$j<-S`9C}Ve_x*epL)#m
ze+U=&x_N}Tf$#iZmUu-G9T#zbr7aBX|8FAlR%Gtj%={EL@ygiR$ft();l_LPd54=g
z75UO9&!3C@!RsTD!*d^)0IzrFBX31tLy;>+6-x0~ap{v=&8@_KW#4QiXzL<uk(F%k
zH=8?&{rhN$tmN<3tCe`Qxz}u=VPhPicPi~p!vy09@;DxcR_Qb$0U9wT;qh*((%9~p
z?GD&UJO$S{fg9-2+>UQsEoj1K>pqxId=yv2j{4Fu>-*TU9Ng-f$(6;$e7;NyxL>K>
zf!oPardUdnQY8J^B_T*7-kXTW+ubc+-V=uSh<Q8eCTNmqzr+s?&=d)O8IKd4qG__(
zLCT6|oQh+}PNjVZ%@Vpe+==GQg!{A1Z&&WW1y&>xlGmNPmDX+>4ZVqmuaH;34z2^`
zJpN8@JlNi;$R*a^nCpIS+uPAb!tL$;e8~2Wy>ou-ROE}x&z`&Z2Q}QkPfes!sq+)>
zdH<dO<|F;PvbP7ldlW8-K!Cn|8h*#)aKCvsUbi5SRbw~4W9^x3=;r-;+^E698;{bP
zQ@LCcguoa9BAslUbS%=z;s;ySE}9_QiqIJkpkLx`MZ2GqPB%sL_o|@k*6o;a5?rQ@
zrb)NLeYV-%hmMMa;+hAQJv3`3?D#X}inP-~wE;e7RDMAJeB6CtD8cAEPmMx*-?-$o
z_gU87PpG}I_9l24_L>`C_*+|i!mT}youJIT^UMge_p6tk_CC+r`w6u-mRKaBl-J_+
zrLWDoC)?&wg6Z#dqtM<Hc)o_^`AL|sF?qfXb2BWnA6lN{$)!7UntrrN)Z+^2M+o%|
zg`D_acbK*JV~^jsz1^7;w72{7VJ1=UZF|qXBM(7)&prEW<e%i7^%(2lkKDhpFz^;{
zh;(sxWoQr5<$9;~#0Th?pcvuhbFEo76X46hf~Qk>{U4p;ob^9kun(gEqX44-qX44-
zqX44-qX46Tj{@)Hr%y$`{LHh?dCeu*?_uXPKhNhi*z`NFbv#FH9ml+_<74#JF}!)^
zotsn8-WM)8?R}B8_s{wH+(WdtyLH@aZfENl+WYi7>yyyl^Ov3WewMZO|NHIjZ5{Vo
zoNOIKd!K^8h`l56e|tCLpr<;^;t{ZT1Y|2c(LSgrvA#qCJNhs?Evw22573=JPZPb2
z#nl{mILQ=N(0Ov*X?0Cot>Xf5u3~$*bZk2=8Xtx(;GE`m+h(hmf8A=h?t5L|ynR(k
z&@;r@2sRqIRmvC8CF03NMe}5sEo5>TaF>amHSks&GZN^unUO$G(jlEVrg0c@v$VXL
z$3A+{f^l^)Q(_IK{rjC-vw=QDn5$S^E~l0<;G~tsBVh3eSUiHr;s$s!zLhOzujjyD
zapqQbF;hgJAZl;AuA|q88j$Vj_>)9v3#i<a<=BNG`V>jhHY*(tRmdZ3`L_YR3~iB1
zV*R&ZHc*vZ-p=OJtGBahv`sEWP3AO7Q;|u|hnLbT#Y_>iCfXsZE@mXaj0BjGK((@G
z2T16)TlNna+7^flyuBuVxLc{4ZTp7vlS~icXtUkywyI{b-Q4Mb#wq-$wygaQq_q>^
zqt4!Z4v&{D&}2n-jpH!7+JkO7b0f8pE0r_(Tj(Cq;yp@Z7~Ln@fKCT4ucY!I1q$d5
zV+k^|%V~8gd)7T(TzDnf0VUrA$eVb#VcknY2q{&$Mt=6v^h<(1O<b@SH^E2UO1ZST
zmO-B(YJ<#4;QP=?;OR%vA?@QN&{p##OZm5=`9v?afX_fcm1Irh1{eqoRNQ_Kbx_#a
zpRN7b+MmTD#MUv0BLlS^U~y$Ny#cfMwG_-4`PvmSFJB`gut28Lps(LXA{nC7nc~gT
z>KYg!+jDKnn`_Ht^1pUFjTGY+UShY*db4BNg7{Iln~pRtLMlOAUR}u`jkxITGaq4V
zf0VAhKpHHS!_JF0i<we!8GAU+7$1bTX`7vP&D`5tfGC`1tJ3=}?=)e~w*XVEN*kty
zMBn-bp)%frbLVFc6QCup@s|l04qgMXYkFW=GV601C^Vx)uljg!Eo-c#)@&ZU2|S1`
zN8HWC)s?kU8JIMCBMY;_oPj+XUpZ<nc@?dYok+J3gsbz$HO!GmPHi1>B%9q%+?fxq
zlCs~*7D^kb986(v7gB3$;N6(5{n^^ToJyw)AOX>yaRK-WFFqFbE1lZQ$yT=ki!0da
z<`o%2({<aZPI?CJK81~ZDZ4_VJD~=#JED`LCfUg_QX8;YS;&?)(Y|rXV_Ry$l`tw@
z*q&@zo%;xpjZS|DB|Ogp1RUAj!4#A^2w-b}<{;pG=^)^YGtNJhHTaQhe^A^wYUQsB
z60hn5R{kKk;qwrt+iVAbxd_~YQ}B71lpk2FPPc-u922(r`W^RIb=&<_zXMyKIC{w$
zatxkkE&|!<zu97^|GpCmwgbR+0N4(|GBS+kVHT2vEx+VW%QUym_MJ|1Kbf}L`+Jr9
zE0qSg#c3sr<Pd(dx6!k>Q7B+t7k8uQV`QVpxUrZ)A2+aGBRl<PI{@gwm>z!6rW&lW
zwFD2Yd)pRq3FU?ib5D@}N*aOoA-27;KPm%oh_qcH=iF=ufSvvm1p#*YpKSa4V;#aG
h{sP%Ze?Nu@Jo~Qf0AMFi{kH(51kVeCFu*(D{{g^lF9HAn

literal 0
HcmV?d00001

-- 
GitLab