From 93c8501566649fa7bd3ff67d87ec2b2670f55118 Mon Sep 17 00:00:00 2001 From: ageorge <anjith.george@idiap.ch> Date: Wed, 1 May 2019 16:35:39 +0200 Subject: [PATCH] mods and clean up --- .../FASNet_config.py} | 10 +-- .../MCCNN_config.py} | 9 +-- .../mccnn/tifs2018/config/haralick_svm.py | 36 +++++---- .../tifs2018/config/{qm_lr.py => iqm_lr.py} | 2 +- .../mccnn/tifs2018/extractor/HaralickRDWT.py | 2 - .../mccnn/tifs2018/script/mean_fusion.py | 4 +- .../{wmca_FASNet.py => wmca_fasnet.py} | 23 +----- .../{wmca_mccnn_v1.py => wmca_mccnn.py} | 22 +---- buildout.cfg | 51 ++++++++++-- doc/running_baslines.md | 81 ++++++++++--------- setup.py | 13 +++ 11 files changed, 129 insertions(+), 124 deletions(-) rename bob/paper/mccnn/tifs2018/{extractor/wmca_config_pytorch_extractor_v1_FASNet.py => config/FASNet_config.py} (92%) rename bob/paper/mccnn/tifs2018/{extractor/wmca_config_pytorch_extractor_v1.py => config/MCCNN_config.py} (95%) rename bob/paper/mccnn/tifs2018/config/{qm_lr.py => iqm_lr.py} (99%) rename bob/paper/mccnn/tifs2018/trainer_configs/{wmca_FASNet.py => wmca_fasnet.py} (86%) rename bob/paper/mccnn/tifs2018/trainer_configs/{wmca_mccnn_v1.py => wmca_mccnn.py} (84%) diff --git a/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1_FASNet.py b/bob/paper/mccnn/tifs2018/config/FASNet_config.py similarity index 92% rename from bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1_FASNet.py rename to bob/paper/mccnn/tifs2018/config/FASNet_config.py index 210c967..b0f805b 100644 --- a/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1_FASNet.py +++ b/bob/paper/mccnn/tifs2018/config/FASNet_config.py @@ -96,17 +96,9 @@ from bob.learn.pytorch.extractor.image import FASNetExtractor from bob.bio.video.extractor import Wrapper -MODEL_FILE='/idiap/temp/ageorge/WMCA_FASNet/0_1_2_False_conv1-block1-ffc_3_grandtest-color-50/model_25_0.pth' +MODEL_FILE= None # Replace with '<PATH_TO_MODEL>' #################################################################### -ADAPTED_LAYERS= 'conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc' -#################################################################### - -SELECTED_CHANNELS= [0,1,2,3] -#################################################################### - - -NUM_CHANNELS_USED=len(SELECTED_CHANNELS) _img_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225])]) diff --git a/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1.py b/bob/paper/mccnn/tifs2018/config/MCCNN_config.py similarity index 95% rename from bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1.py rename to bob/paper/mccnn/tifs2018/config/MCCNN_config.py index d8b41f7..71e02fc 100644 --- a/bob/paper/mccnn/tifs2018/extractor/wmca_config_pytorch_extractor_v1.py +++ b/bob/paper/mccnn/tifs2018/config/MCCNN_config.py @@ -1,5 +1,3 @@ - - # ============================================================================= # define instance of the preprocessor: @@ -77,7 +75,6 @@ _preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir, _preprocessors[_channel_names[1]] = _preprocessor_ir - _preprocessors[_channel_names[2]] = _preprocessor_ir _preprocessors[_channel_names[3]] = _preprocessor_ir @@ -91,15 +88,14 @@ preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors, #==================================================================================== # MC-CNN algorithm - from bob.learn.pytorch.extractor.image import MCCNNExtractor from bob.bio.video.extractor import Wrapper -MODEL_FILE='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1/0_1_2_3_False_conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc_4/model_25_0.pth' +MODEL_FILE= None # Replace with '<PATH_TO_MODEL>' #################################################################### -ADAPTED_LAYERS= 'conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc' +ADAPTED_LAYERS= 'conv1-group1-block1-ffc' #################################################################### SELECTED_CHANNELS= [0,1,2,3] @@ -110,7 +106,6 @@ NUM_CHANNELS_USED=len(SELECTED_CHANNELS) _img_transform = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),transforms.ToTensor()]) - _image_extracor=MCCNNExtractor(num_channels_used=NUM_CHANNELS_USED, transforms=_img_transform, model_file=MODEL_FILE) extractor = Wrapper(_image_extracor) diff --git a/bob/paper/mccnn/tifs2018/config/haralick_svm.py b/bob/paper/mccnn/tifs2018/config/haralick_svm.py index 3b9a04d..d2e1793 100644 --- a/bob/paper/mccnn/tifs2018/config/haralick_svm.py +++ b/bob/paper/mccnn/tifs2018/config/haralick_svm.py @@ -13,24 +13,26 @@ from bob.bio.video.utils import FrameSelector from bob.bio.base.preprocessor import Preprocessor -import sys -sys.path.append('/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/baseline_configs/extractor/') - -class DummyPreprocessor(Preprocessor): - - def __init__(self): - - - Preprocessor.__init__(self) - - def __call__(self,image,annotations=None): - return image - - -_preprocessor=DummyPreprocessor() - -preprocessor=Wrapper(_preprocessor) +FACE_SIZE = 128 # The size of the resulting face +RGB_OUTPUT_FLAG = False # RGB output +USE_FACE_ALIGNMENT = True # use annotations +MAX_IMAGE_SIZE = None # no limiting here +FACE_DETECTION_METHOD = None # use annotations 'mtcnn' if no annotations available +MIN_FACE_SIZE = 50 # skip small faces +ALIGNMENT_TYPE='lightcnn' +_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE, + rgb_output_flag = RGB_OUTPUT_FLAG, + use_face_alignment = USE_FACE_ALIGNMENT, + alignment_type=ALIGNMENT_TYPE, + max_image_size = MAX_IMAGE_SIZE, + face_detection_method = FACE_DETECTION_METHOD, + min_face_size = MIN_FACE_SIZE) + +_frame_selector = FrameSelector(selection_style = "all") + +preprocessor = Wrapper(preprocessor = _image_preprocessor, + frame_selector = _frame_selector) #==================== DUmmy diff --git a/bob/paper/mccnn/tifs2018/config/qm_lr.py b/bob/paper/mccnn/tifs2018/config/iqm_lr.py similarity index 99% rename from bob/paper/mccnn/tifs2018/config/qm_lr.py rename to bob/paper/mccnn/tifs2018/config/iqm_lr.py index 9c337f6..a3c5e9e 100644 --- a/bob/paper/mccnn/tifs2018/config/qm_lr.py +++ b/bob/paper/mccnn/tifs2018/config/iqm_lr.py @@ -7,7 +7,7 @@ The IQM features used in this algorithm/resource are introduced in the following """ #======================================================================================= -sub_directory = 'qm_lr' +sub_directory = 'iqm_lr' """ Sub-directory where results will be placed. diff --git a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py index 08e2c9c..30d3ff2 100644 --- a/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py +++ b/bob/paper/mccnn/tifs2018/extractor/HaralickRDWT.py @@ -8,8 +8,6 @@ import numpy as np import pywt import mahotas -#TODO: Move to paper package, number of directions?, type casting, - class HaralickRDWT(Extractor): """Calculates RDWT+Haralick feature descriptors for 2D and multi-channel images. diff --git a/bob/paper/mccnn/tifs2018/script/mean_fusion.py b/bob/paper/mccnn/tifs2018/script/mean_fusion.py index 04b2df1..730e672 100644 --- a/bob/paper/mccnn/tifs2018/script/mean_fusion.py +++ b/bob/paper/mccnn/tifs2018/script/mean_fusion.py @@ -38,7 +38,7 @@ class Mean(): if self.weights is not None: X=X*self.weights - prob=np.mean(X,axis=1)#-11.35396267 + prob=np.mean(X,axis=1) res=np.vstack((1.0-prob,prob)).T return res @@ -101,7 +101,7 @@ def main(cmd_params=None): if save_path == ".": - save_path='/idiap/temp/ageorge/WMCA_channels_baseline/mean_fusion/'+protocol+'/fusionmine/' + save_path=protocol+'/fusion/' for group in groups: diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_FASNet.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py similarity index 86% rename from bob/paper/mccnn/tifs2018/trainer_configs/wmca_FASNet.py rename to bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py index f25a774..e16c819 100644 --- a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_FASNet.py +++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_fasnet.py @@ -34,9 +34,9 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage #============================================================================== # Initialize the bob database instance -data_folder_train='/idiap/temp/ageorge/WMCA_channels_baseline/color/preprocessed/' +data_folder_train='<FASNET_PREPROCESSED_FOLDER>' -output_base_path='/idiap/temp/ageorge/WMCA_FASNet/' +output_base_path='<FASNET_CNN_OUTPUT_PATH>' unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'] @@ -138,22 +138,3 @@ output_dir = output_base_path+UID assert(len(SELECTED_CHANNELS)==NUM_CHANNELS) network=FASNet(pretrained=True) -#============================================================================== -""" -Note: Running in GPU - -jman submit --queue gpu \ ---name FASNet \ ---log-dir /idiap/temp/ageorge/WMCA_FASNet/logs/ \ ---environment="PYTHONUNBUFFERED=1" -- \ -./bin/train_fasnet.py \ -/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/mccnn_configs/configs/trainer/wmca_FASNet.py --use-gpu -vvv - -Note: Running in cpu - -./bin/train_fasnet.py \ -/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/mccnn_configs/configs/trainer/wmca_FASNet.py -vvv - - -""" - diff --git a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn_v1.py b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py similarity index 84% rename from bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn_v1.py rename to bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py index 193362c..62c473b 100644 --- a/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn_v1.py +++ b/bob/paper/mccnn/tifs2018/trainer_configs/wmca_mccnn.py @@ -34,9 +34,9 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage #============================================================================== # Initialize the bob database instance -data_folder_train='/idiap/temp/ageorge/WMCA/preprocessed/' +data_folder_train='<MCCNN_PREPROCESSED_PATH>' -output_base_path='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1_new_2/' +output_base_path='<MCCNN_CNN_OUTPUT_PATH>' unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'] @@ -114,21 +114,3 @@ output_dir = output_base_path+UID assert(len(SELECTED_CHANNELS)==NUM_CHANNELS) network=MCCNN(num_channels = NUM_CHANNELS) -#============================================================================== -""" -Note: Running in GPU - -jman submit --queue gpu \ ---name mccnnv2 \ ---log-dir /idiap/temp/ageorge/Pytorch_WMCA/MCCNNv2/logs/ \ ---environment="PYTHONUNBUFFERED=1" -- \ -./bin/train_mccnn.py \ -/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py --use-gpu -vvv - -Note: Running in cpu - -./bin/train_mccnn.py \ -/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py -vvv - -""" - diff --git a/buildout.cfg b/buildout.cfg index fbe6a7e..9e6192b 100644 --- a/buildout.cfg +++ b/buildout.cfg @@ -1,14 +1,51 @@ -; vim: set fileencoding=utf-8 : -; Fri Dec 23 13:58:08 CET 2016 - [buildout] parts = scripts -develop = . -eggs = bob.paper.mccnn.tifs2018 +versions = versions + + extensions = bob.buildout + mr.developer + + +auto-checkout = * +debug = false newest = false -verbose = true +verbose = false + + +eggs = bob.extension + bob.measure + bob.bio.base + bob.ip.base + bob.ip.mtcnn + bob.learn.pytorch + bob.pad.face + bob.paper.mccnn.tifs2018 + + +develop = src/bob.extension + src/bob.measure + src/bob.bio.base + src/bob.ip.base + src/bob.ip.mtcnn + src/bob.learn.pytorch + src/bob.pad.face + src/bob.paper.mccnn.tifs2018 + +[versions] + + +[sources] +bob.extension = git git@gitlab.idiap.ch:bob/bob.extension rev=8af04533484c30a90a62bc80116e233d67a72ac6 +bob.measure = git git@gitlab.idiap.ch:bob/bob.measure rev=022f97124a2d512462f64a065269b19de5f62ce6 +bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base rev=d92d0fb9672f95306da8259fef713d459acb5ee6 +bob.ip.base = git git@gitlab.idiap.ch:bob/bob.ip.base rev=a890160bb91ebe24fe92eac75725d99d5a73bb3e +bob.ip.mtcnn = git git@gitlab.idiap.ch:bob/bob.ip.mtcnn rev=eea537b2fa99d0d52285a1353dc4988b58810f29 +bob.learn.pytorch = git git@gitlab.idiap.ch:bob/bob.learn.pytorch rev=03ca71beaf43219ff1a7a33842bb2a389de6d50b +bob.pad.face = git git@gitlab.idiap.ch:bob/bob.pad.face rev=a260437b58694b64b61ca674cea4f698a6b674f0 +bob.paper.mccnn.tifs2018 = git git@gitlab.idiap.ch:bob/bob.paper.mccnn.tifs2018 + [scripts] recipe = bob.buildout:scripts -dependent-scripts = true \ No newline at end of file +dependent-scripts = true diff --git a/doc/running_baslines.md b/doc/running_baslines.md index ea8cec5..a107022 100644 --- a/doc/running_baslines.md +++ b/doc/running_baslines.md @@ -2,19 +2,28 @@ Running baselines in grandtest protocol of WMCA =============================================== In the baseline experiments, initially experiments are done separately for all four channels and score fusion is performed. -Running experiments for each channel is described below. +Running experiments for each channel is described below. The feature based baselines implemented are Image Quality Measure + Logistic Regression (IQM+LR), +Local Binary Pattern + Logistic Regression (LBP+LR), and Haralick-RDWT-SVM (H-SVM). +For the baselines using Haralick-SVM you need to have mahotas packsge. It is available from conda-forge channel and +can be installed as + +.. code-block:: sh + + conda install -c https://conda.anaconda.org/conda-forge mahotas + +The steps to reproduce the results for grandtest protocol are listed below. Color channel ----------------- +------------- 1.A.1. IQM - LR .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_color.py \ - <PATH_TO_EXTRACTOR_CONFIG>/qm_lr.py \ + wmca-color \ + iqm-lr \ --sub-directory <PATH_TO_BASELINE_RESULTS>/color/ -vvv --grid idiap python bin/scoring.py -df \ @@ -27,8 +36,8 @@ Color channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_color.py \ - <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ + wmca-color \ + haralick-svm \ --skip-preprocessing \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/color/preprocessed \ --sub-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/ -vvv --grid idiap @@ -47,8 +56,8 @@ B. Depth channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_depth.py \ - <PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_depth.py \ + wmca-depth \ + lbp-lr-depth \ --sub-directory <PATH_TO_BASELINE_RESULTS>/depth/ -vvv --grid idiap python bin/scoring.py -df \ @@ -60,8 +69,8 @@ B. Depth channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_depth.py \ - <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ + wmca-depth \ + haralick-svm \ --skip-preprocessing \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/depth/preprocessed \ --sub-directory <PATH_TO_BASELINE_RESULTS>/depth/haralicksvm/ -vvv --grid idiap @@ -79,8 +88,8 @@ C. Infrared channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_infrared.py \ - <PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_infrared.py \ + wmca-infrared \ + lbp-lr-infrared \ --sub-directory <PATH_TO_BASELINE_RESULTS>/infrared/ -vvv --grid idiap 1.C.2 Haralick SVM @@ -88,8 +97,8 @@ C. Infrared channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_infrared.py \ - <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ + wmca-infrared \ + haralick-svm \ --skip-preprocessing \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/infrared/preprocessed \ --sub-directory <PATH_TO_BASELINE_RESULTS>/infrared/haralicksvm/ -vvv --grid idiap @@ -107,8 +116,8 @@ D. Thermal channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_thermal.py \ - <PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_thermal.py \ + wmca-thermal \ + lbp-lr-thermal \ --sub-directory <PATH_TO_BASELINE_RESULTS>/thermal/ -vvv --grid idiap python bin/scoring.py -df \ @@ -120,8 +129,8 @@ D. Thermal channel .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_thermal.py \ - <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ + wmca-thermal \ + haralick-svm \ --skip-preprocessing \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/thermal/preprocessed \ --sub-directory <PATH_TO_BASELINE_RESULTS>/thermal/haralicksvm/ -vvv --grid idiap @@ -170,8 +179,9 @@ A. color unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'] -PROTOCOL = 'grandtest-color-50'+unseen_protocols[0] +The protocols can be easily indexed as +PROTOCOL = 'grandtest-color-50'+unseen_protocols[0] 1. @@ -179,8 +189,8 @@ PROTOCOL = 'grandtest-color-50'+unseen_protocols[0] .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_color.py \ - <PATH_TO_EXTRACTOR_CONFIG>/qm_lr.py \ + wmca-color \ + iqm-lr \ --protocol grandtest-color-50-LOO_fakehead \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/extracted/ \ @@ -197,8 +207,8 @@ B. depth .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_depth.py \ - <PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_depth.py \ + wmca-depth \ + lbp-lr-depth \ --protocol grandtest-depth-50-LOO_fakehead \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/depth/extracted/ \ @@ -215,8 +225,8 @@ C. Infrared .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_infrared.py \ - <PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_infrared.py \ + wmca-infrared \ + lbp-lr-infrared \ --protocol grandtest-infrared-50-LOO_fakehead \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/infrared/extracted/ \ @@ -232,8 +242,8 @@ D. Thermal .. code-block:: sh ./bin/spoof.py \ - <PATH_TO_DATABASE_CONFIG>/batl_db_thermal.py \ - <PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_thermal.py \ + wmca-thermal \ + lbp-lr-thermal \ --protocol grandtest-thermal-50-LOO_fakehead \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/thermal/extracted/ \ @@ -255,7 +265,6 @@ The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_pape <PATH_TO_BASELINE_RESULTS>/LOO/color/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev - .. code-block:: sh python bin/scoring.py -df \ @@ -273,19 +282,15 @@ color ----- .. code-block:: sh - ./bin/spoof.py <PATH_TO_DATABASE_CONFIG>/batl_db_color.py <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py --protocol grandtest-color-50-LOO_fakehead --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/extracted/ --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/ - + ./bin/spoof.py wmca-color haralick-svm --protocol grandtest-color-50-LOO_fakehead --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/extracted/ --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/ -All of them together --------------------- - -.. code-block:: sh +Repeat the same procedure for all four channels. - ./bin/spoof.py <PATH_TO_DATABASE_CONFIG>/batl_db_color.py <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py --protocol grandtest-color-50-LOO_fakehead --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/extracted/ --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/ +Scorefusion Haralick-SVM +------------------------ +Once scores from all channels are available. Run the following command. -scorefusion Haralick ---------------------- .. code-block:: sh ./bin/python bin/mean_fusion.py -c <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev -s <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/ @@ -297,4 +302,4 @@ scoring for Haralick Mean fusion python bin/scoring.py -df <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/scores_mean_fused_dev -ef <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/scores_mean_fused_eval -Similarly, repeat the experiment for all protocols. \ No newline at end of file +Similarly, repeat the experiment for all LOO sub-protocols. \ No newline at end of file diff --git a/setup.py b/setup.py index ed83cdc..0aab1cf 100644 --- a/setup.py +++ b/setup.py @@ -100,6 +100,19 @@ setup( 'mc-face-preprocessor = bob.paper.mccnn.tifs2018.config.preprocessor.mc_preprocessor:mc_preprocessor', # returns Multi channel aligned face images ], + 'bob.bio.config': ['wmca-color = bob.paper.mccnn.tifs2018.database.batl_db_color', + 'wmca-depth = bob.paper.mccnn.tifs2018.database.batl_db_depth', + 'wmca-infrared = bob.paper.mccnn.tifs2018.database.batl_db_infrared', + 'wmca-all= bob.paper.mccnn.tifs2018.database.batl_db_rgb_ir_d_t_grandtest', + 'wmca-thermal = bob.paper.mccnn.tifs2018.database.batl_db_thermal', + #configs + 'lbp-lr-infrared = bob.paper.mccnn.tifs2018.config.lbp_lr_infrared', + 'lbp-lr-thermal= bob.paper.mccnn.tifs2018.config.lbp_lr_thermal', + 'lbp-lr-depth= bob.paper.mccnn.tifs2018.config.lbp_lr_depth', + 'fastnet = bob.paper.mccnn.tifs2018.config.FASNet_config', + 'mccnn = bob.paper.mccnn.tifs2018.config.MCCNN_config', + 'iqm-lr = bob.paper.mccnn.tifs2018.config.iqm_lr', + 'haralick-svm = bob.paper.mccnn.tifs2018.config.haralick_svm',] }, -- GitLab