Skip to content
Snippets Groups Projects
Commit 93c85015 authored by Anjith GEORGE's avatar Anjith GEORGE
Browse files

mods and clean up

parent 45773d92
No related branches found
No related tags found
No related merge requests found
Pipeline #29786 canceled
...@@ -96,17 +96,9 @@ from bob.learn.pytorch.extractor.image import FASNetExtractor ...@@ -96,17 +96,9 @@ from bob.learn.pytorch.extractor.image import FASNetExtractor
from bob.bio.video.extractor import Wrapper from bob.bio.video.extractor import Wrapper
MODEL_FILE='/idiap/temp/ageorge/WMCA_FASNet/0_1_2_False_conv1-block1-ffc_3_grandtest-color-50/model_25_0.pth' MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'
#################################################################### ####################################################################
ADAPTED_LAYERS= 'conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc'
####################################################################
SELECTED_CHANNELS= [0,1,2,3]
####################################################################
NUM_CHANNELS_USED=len(SELECTED_CHANNELS)
_img_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406], _img_transform = transforms.Compose([transforms.ToPILImage(),transforms.Resize(224, interpolation=2),transforms.ToTensor(),transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])]) std=[0.229, 0.224, 0.225])])
......
# ============================================================================= # =============================================================================
# define instance of the preprocessor: # define instance of the preprocessor:
...@@ -77,7 +75,6 @@ _preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir, ...@@ -77,7 +75,6 @@ _preprocessor_ir = Wrapper(preprocessor = _image_preprocessor_ir,
_preprocessors[_channel_names[1]] = _preprocessor_ir _preprocessors[_channel_names[1]] = _preprocessor_ir
_preprocessors[_channel_names[2]] = _preprocessor_ir _preprocessors[_channel_names[2]] = _preprocessor_ir
_preprocessors[_channel_names[3]] = _preprocessor_ir _preprocessors[_channel_names[3]] = _preprocessor_ir
...@@ -91,15 +88,14 @@ preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors, ...@@ -91,15 +88,14 @@ preprocessor = VideoFaceCropAlignBlockPatch(preprocessors = _preprocessors,
#==================================================================================== #====================================================================================
# MC-CNN algorithm # MC-CNN algorithm
from bob.learn.pytorch.extractor.image import MCCNNExtractor from bob.learn.pytorch.extractor.image import MCCNNExtractor
from bob.bio.video.extractor import Wrapper from bob.bio.video.extractor import Wrapper
MODEL_FILE='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1/0_1_2_3_False_conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc_4/model_25_0.pth' MODEL_FILE= None # Replace with '<PATH_TO_MODEL>'
#################################################################### ####################################################################
ADAPTED_LAYERS= 'conv1-group1-block1-block2-group2-block3-group3-block4-group4-fc-ffc' ADAPTED_LAYERS= 'conv1-group1-block1-ffc'
#################################################################### ####################################################################
SELECTED_CHANNELS= [0,1,2,3] SELECTED_CHANNELS= [0,1,2,3]
...@@ -110,7 +106,6 @@ NUM_CHANNELS_USED=len(SELECTED_CHANNELS) ...@@ -110,7 +106,6 @@ NUM_CHANNELS_USED=len(SELECTED_CHANNELS)
_img_transform = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),transforms.ToTensor()]) _img_transform = transforms.Compose([ChannelSelect(selected_channels = SELECTED_CHANNELS),transforms.ToTensor()])
_image_extracor=MCCNNExtractor(num_channels_used=NUM_CHANNELS_USED, transforms=_img_transform, model_file=MODEL_FILE) _image_extracor=MCCNNExtractor(num_channels_used=NUM_CHANNELS_USED, transforms=_img_transform, model_file=MODEL_FILE)
extractor = Wrapper(_image_extracor) extractor = Wrapper(_image_extracor)
......
...@@ -13,24 +13,26 @@ from bob.bio.video.utils import FrameSelector ...@@ -13,24 +13,26 @@ from bob.bio.video.utils import FrameSelector
from bob.bio.base.preprocessor import Preprocessor from bob.bio.base.preprocessor import Preprocessor
import sys
sys.path.append('/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/baseline_configs/extractor/') FACE_SIZE = 128 # The size of the resulting face
RGB_OUTPUT_FLAG = False # RGB output
class DummyPreprocessor(Preprocessor): USE_FACE_ALIGNMENT = True # use annotations
MAX_IMAGE_SIZE = None # no limiting here
def __init__(self): FACE_DETECTION_METHOD = None # use annotations 'mtcnn' if no annotations available
MIN_FACE_SIZE = 50 # skip small faces
ALIGNMENT_TYPE='lightcnn'
Preprocessor.__init__(self) _image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
def __call__(self,image,annotations=None): use_face_alignment = USE_FACE_ALIGNMENT,
return image alignment_type=ALIGNMENT_TYPE,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
_preprocessor=DummyPreprocessor() min_face_size = MIN_FACE_SIZE)
preprocessor=Wrapper(_preprocessor) _frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
#==================== DUmmy #==================== DUmmy
......
...@@ -7,7 +7,7 @@ The IQM features used in this algorithm/resource are introduced in the following ...@@ -7,7 +7,7 @@ The IQM features used in this algorithm/resource are introduced in the following
""" """
#======================================================================================= #=======================================================================================
sub_directory = 'qm_lr' sub_directory = 'iqm_lr'
""" """
Sub-directory where results will be placed. Sub-directory where results will be placed.
......
...@@ -8,8 +8,6 @@ import numpy as np ...@@ -8,8 +8,6 @@ import numpy as np
import pywt import pywt
import mahotas import mahotas
#TODO: Move to paper package, number of directions?, type casting,
class HaralickRDWT(Extractor): class HaralickRDWT(Extractor):
"""Calculates RDWT+Haralick feature descriptors for 2D and multi-channel images. """Calculates RDWT+Haralick feature descriptors for 2D and multi-channel images.
......
...@@ -38,7 +38,7 @@ class Mean(): ...@@ -38,7 +38,7 @@ class Mean():
if self.weights is not None: if self.weights is not None:
X=X*self.weights X=X*self.weights
prob=np.mean(X,axis=1)#-11.35396267 prob=np.mean(X,axis=1)
res=np.vstack((1.0-prob,prob)).T res=np.vstack((1.0-prob,prob)).T
return res return res
...@@ -101,7 +101,7 @@ def main(cmd_params=None): ...@@ -101,7 +101,7 @@ def main(cmd_params=None):
if save_path == ".": if save_path == ".":
save_path='/idiap/temp/ageorge/WMCA_channels_baseline/mean_fusion/'+protocol+'/fusionmine/' save_path=protocol+'/fusion/'
for group in groups: for group in groups:
......
...@@ -34,9 +34,9 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage ...@@ -34,9 +34,9 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
#============================================================================== #==============================================================================
# Initialize the bob database instance # Initialize the bob database instance
data_folder_train='/idiap/temp/ageorge/WMCA_channels_baseline/color/preprocessed/' data_folder_train='<FASNET_PREPROCESSED_FOLDER>'
output_base_path='/idiap/temp/ageorge/WMCA_FASNet/' output_base_path='<FASNET_CNN_OUTPUT_PATH>'
unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'] unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
...@@ -138,22 +138,3 @@ output_dir = output_base_path+UID ...@@ -138,22 +138,3 @@ output_dir = output_base_path+UID
assert(len(SELECTED_CHANNELS)==NUM_CHANNELS) assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
network=FASNet(pretrained=True) network=FASNet(pretrained=True)
#==============================================================================
"""
Note: Running in GPU
jman submit --queue gpu \
--name FASNet \
--log-dir /idiap/temp/ageorge/WMCA_FASNet/logs/ \
--environment="PYTHONUNBUFFERED=1" -- \
./bin/train_fasnet.py \
/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/mccnn_configs/configs/trainer/wmca_FASNet.py --use-gpu -vvv
Note: Running in cpu
./bin/train_fasnet.py \
/idiap/user/ageorge/WORK/MCCNN/bob.paper.mccnn.tifs2018/mccnn_configs/configs/trainer/wmca_FASNet.py -vvv
"""
...@@ -34,9 +34,9 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage ...@@ -34,9 +34,9 @@ from bob.learn.pytorch.datasets import ChannelSelect, RandomHorizontalFlipImage
#============================================================================== #==============================================================================
# Initialize the bob database instance # Initialize the bob database instance
data_folder_train='/idiap/temp/ageorge/WMCA/preprocessed/' data_folder_train='<MCCNN_PREPROCESSED_PATH>'
output_base_path='/idiap/temp/ageorge/Pytorch_WMCA/MCCNNv1_new_2/' output_base_path='<MCCNN_CNN_OUTPUT_PATH>'
unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'] unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
...@@ -114,21 +114,3 @@ output_dir = output_base_path+UID ...@@ -114,21 +114,3 @@ output_dir = output_base_path+UID
assert(len(SELECTED_CHANNELS)==NUM_CHANNELS) assert(len(SELECTED_CHANNELS)==NUM_CHANNELS)
network=MCCNN(num_channels = NUM_CHANNELS) network=MCCNN(num_channels = NUM_CHANNELS)
#==============================================================================
"""
Note: Running in GPU
jman submit --queue gpu \
--name mccnnv2 \
--log-dir /idiap/temp/ageorge/Pytorch_WMCA/MCCNNv2/logs/ \
--environment="PYTHONUNBUFFERED=1" -- \
./bin/train_mccnn.py \
/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py --use-gpu -vvv
Note: Running in cpu
./bin/train_mccnn.py \
/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/mccnn/wmca_mccnn.py -vvv
"""
; vim: set fileencoding=utf-8 :
; Fri Dec 23 13:58:08 CET 2016
[buildout] [buildout]
parts = scripts parts = scripts
develop = . versions = versions
eggs = bob.paper.mccnn.tifs2018
extensions = bob.buildout extensions = bob.buildout
mr.developer
auto-checkout = *
debug = false
newest = false newest = false
verbose = true verbose = false
eggs = bob.extension
bob.measure
bob.bio.base
bob.ip.base
bob.ip.mtcnn
bob.learn.pytorch
bob.pad.face
bob.paper.mccnn.tifs2018
develop = src/bob.extension
src/bob.measure
src/bob.bio.base
src/bob.ip.base
src/bob.ip.mtcnn
src/bob.learn.pytorch
src/bob.pad.face
src/bob.paper.mccnn.tifs2018
[versions]
[sources]
bob.extension = git git@gitlab.idiap.ch:bob/bob.extension rev=8af04533484c30a90a62bc80116e233d67a72ac6
bob.measure = git git@gitlab.idiap.ch:bob/bob.measure rev=022f97124a2d512462f64a065269b19de5f62ce6
bob.bio.base = git git@gitlab.idiap.ch:bob/bob.bio.base rev=d92d0fb9672f95306da8259fef713d459acb5ee6
bob.ip.base = git git@gitlab.idiap.ch:bob/bob.ip.base rev=a890160bb91ebe24fe92eac75725d99d5a73bb3e
bob.ip.mtcnn = git git@gitlab.idiap.ch:bob/bob.ip.mtcnn rev=eea537b2fa99d0d52285a1353dc4988b58810f29
bob.learn.pytorch = git git@gitlab.idiap.ch:bob/bob.learn.pytorch rev=03ca71beaf43219ff1a7a33842bb2a389de6d50b
bob.pad.face = git git@gitlab.idiap.ch:bob/bob.pad.face rev=a260437b58694b64b61ca674cea4f698a6b674f0
bob.paper.mccnn.tifs2018 = git git@gitlab.idiap.ch:bob/bob.paper.mccnn.tifs2018
[scripts] [scripts]
recipe = bob.buildout:scripts recipe = bob.buildout:scripts
dependent-scripts = true dependent-scripts = true
\ No newline at end of file
...@@ -2,19 +2,28 @@ Running baselines in grandtest protocol of WMCA ...@@ -2,19 +2,28 @@ Running baselines in grandtest protocol of WMCA
=============================================== ===============================================
In the baseline experiments, initially experiments are done separately for all four channels and score fusion is performed. In the baseline experiments, initially experiments are done separately for all four channels and score fusion is performed.
Running experiments for each channel is described below. Running experiments for each channel is described below. The feature based baselines implemented are Image Quality Measure + Logistic Regression (IQM+LR),
Local Binary Pattern + Logistic Regression (LBP+LR), and Haralick-RDWT-SVM (H-SVM).
For the baselines using Haralick-SVM you need to have mahotas packsge. It is available from conda-forge channel and
can be installed as
.. code-block:: sh
conda install -c https://conda.anaconda.org/conda-forge mahotas
The steps to reproduce the results for grandtest protocol are listed below.
Color channel Color channel
---------------- -------------
1.A.1. IQM - LR 1.A.1. IQM - LR
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_color.py \ wmca-color \
<PATH_TO_EXTRACTOR_CONFIG>/qm_lr.py \ iqm-lr \
--sub-directory <PATH_TO_BASELINE_RESULTS>/color/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/color/ -vvv --grid idiap
python bin/scoring.py -df \ python bin/scoring.py -df \
...@@ -27,8 +36,8 @@ Color channel ...@@ -27,8 +36,8 @@ Color channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_color.py \ wmca-color \
<PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ haralick-svm \
--skip-preprocessing \ --skip-preprocessing \
--preprocessed-directory <PATH_TO_BASELINE_RESULTS>/color/preprocessed \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/color/preprocessed \
--sub-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/ -vvv --grid idiap
...@@ -47,8 +56,8 @@ B. Depth channel ...@@ -47,8 +56,8 @@ B. Depth channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_depth.py \ wmca-depth \
<PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_depth.py \ lbp-lr-depth \
--sub-directory <PATH_TO_BASELINE_RESULTS>/depth/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/depth/ -vvv --grid idiap
python bin/scoring.py -df \ python bin/scoring.py -df \
...@@ -60,8 +69,8 @@ B. Depth channel ...@@ -60,8 +69,8 @@ B. Depth channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_depth.py \ wmca-depth \
<PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ haralick-svm \
--skip-preprocessing \ --skip-preprocessing \
--preprocessed-directory <PATH_TO_BASELINE_RESULTS>/depth/preprocessed \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/depth/preprocessed \
--sub-directory <PATH_TO_BASELINE_RESULTS>/depth/haralicksvm/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/depth/haralicksvm/ -vvv --grid idiap
...@@ -79,8 +88,8 @@ C. Infrared channel ...@@ -79,8 +88,8 @@ C. Infrared channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_infrared.py \ wmca-infrared \
<PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_infrared.py \ lbp-lr-infrared \
--sub-directory <PATH_TO_BASELINE_RESULTS>/infrared/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/infrared/ -vvv --grid idiap
1.C.2 Haralick SVM 1.C.2 Haralick SVM
...@@ -88,8 +97,8 @@ C. Infrared channel ...@@ -88,8 +97,8 @@ C. Infrared channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_infrared.py \ wmca-infrared \
<PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ haralick-svm \
--skip-preprocessing \ --skip-preprocessing \
--preprocessed-directory <PATH_TO_BASELINE_RESULTS>/infrared/preprocessed \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/infrared/preprocessed \
--sub-directory <PATH_TO_BASELINE_RESULTS>/infrared/haralicksvm/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/infrared/haralicksvm/ -vvv --grid idiap
...@@ -107,8 +116,8 @@ D. Thermal channel ...@@ -107,8 +116,8 @@ D. Thermal channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_thermal.py \ wmca-thermal \
<PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_thermal.py \ lbp-lr-thermal \
--sub-directory <PATH_TO_BASELINE_RESULTS>/thermal/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/thermal/ -vvv --grid idiap
python bin/scoring.py -df \ python bin/scoring.py -df \
...@@ -120,8 +129,8 @@ D. Thermal channel ...@@ -120,8 +129,8 @@ D. Thermal channel
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_thermal.py \ wmca-thermal \
<PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py \ haralick-svm \
--skip-preprocessing \ --skip-preprocessing \
--preprocessed-directory <PATH_TO_BASELINE_RESULTS>/thermal/preprocessed \ --preprocessed-directory <PATH_TO_BASELINE_RESULTS>/thermal/preprocessed \
--sub-directory <PATH_TO_BASELINE_RESULTS>/thermal/haralicksvm/ -vvv --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/thermal/haralicksvm/ -vvv --grid idiap
...@@ -170,8 +179,9 @@ A. color ...@@ -170,8 +179,9 @@ A. color
unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask'] unseen_protocols=['','-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_papermask','-LOO_prints','-LOO_replay','-LOO_rigidmask']
PROTOCOL = 'grandtest-color-50'+unseen_protocols[0] The protocols can be easily indexed as
PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
1. 1.
...@@ -179,8 +189,8 @@ PROTOCOL = 'grandtest-color-50'+unseen_protocols[0] ...@@ -179,8 +189,8 @@ PROTOCOL = 'grandtest-color-50'+unseen_protocols[0]
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_color.py \ wmca-color \
<PATH_TO_EXTRACTOR_CONFIG>/qm_lr.py \ iqm-lr \
--protocol grandtest-color-50-LOO_fakehead \ --protocol grandtest-color-50-LOO_fakehead \
--skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \
--extracted-directory <PATH_TO_BASELINE_RESULTS>/color/extracted/ \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/extracted/ \
...@@ -197,8 +207,8 @@ B. depth ...@@ -197,8 +207,8 @@ B. depth
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_depth.py \ wmca-depth \
<PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_depth.py \ lbp-lr-depth \
--protocol grandtest-depth-50-LOO_fakehead \ --protocol grandtest-depth-50-LOO_fakehead \
--skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \
--extracted-directory <PATH_TO_BASELINE_RESULTS>/depth/extracted/ \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/depth/extracted/ \
...@@ -215,8 +225,8 @@ C. Infrared ...@@ -215,8 +225,8 @@ C. Infrared
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_infrared.py \ wmca-infrared \
<PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_infrared.py \ lbp-lr-infrared \
--protocol grandtest-infrared-50-LOO_fakehead \ --protocol grandtest-infrared-50-LOO_fakehead \
--skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \
--extracted-directory <PATH_TO_BASELINE_RESULTS>/infrared/extracted/ \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/infrared/extracted/ \
...@@ -232,8 +242,8 @@ D. Thermal ...@@ -232,8 +242,8 @@ D. Thermal
.. code-block:: sh .. code-block:: sh
./bin/spoof.py \ ./bin/spoof.py \
<PATH_TO_DATABASE_CONFIG>/batl_db_thermal.py \ wmca-thermal \
<PATH_TO_EXTRACTOR_CONFIG>/lbp_lr_thermal.py \ lbp-lr-thermal \
--protocol grandtest-thermal-50-LOO_fakehead \ --protocol grandtest-thermal-50-LOO_fakehead \
--skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \ --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files \
--extracted-directory <PATH_TO_BASELINE_RESULTS>/thermal/extracted/ \ --extracted-directory <PATH_TO_BASELINE_RESULTS>/thermal/extracted/ \
...@@ -255,7 +265,6 @@ The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_pape ...@@ -255,7 +265,6 @@ The protocola are, '-LOO_fakehead','-LOO_flexiblemask','-LOO_glasses','-LOO_pape
<PATH_TO_BASELINE_RESULTS>/LOO/color/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev <PATH_TO_BASELINE_RESULTS>/LOO/color/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev
.. code-block:: sh .. code-block:: sh
python bin/scoring.py -df \ python bin/scoring.py -df \
...@@ -273,19 +282,15 @@ color ...@@ -273,19 +282,15 @@ color
----- -----
.. code-block:: sh .. code-block:: sh
./bin/spoof.py <PATH_TO_DATABASE_CONFIG>/batl_db_color.py <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py --protocol grandtest-color-50-LOO_fakehead --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/extracted/ --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/ ./bin/spoof.py wmca-color haralick-svm --protocol grandtest-color-50-LOO_fakehead --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/extracted/ --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/
All of them together Repeat the same procedure for all four channels.
--------------------
.. code-block:: sh
./bin/spoof.py <PATH_TO_DATABASE_CONFIG>/batl_db_color.py <PATH_TO_EXTRACTOR_CONFIG>/haralick_svm.py --protocol grandtest-color-50-LOO_fakehead --skip-preprocessing --skip-extraction --skip-extractor-training --allow-missing-files --extracted-directory <PATH_TO_BASELINE_RESULTS>/color/haralicksvm/extracted/ --grid idiap --sub-directory <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/ Scorefusion Haralick-SVM
------------------------
Once scores from all channels are available. Run the following command.
scorefusion Haralick
---------------------
.. code-block:: sh .. code-block:: sh
./bin/python bin/mean_fusion.py -c <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev -s <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/ ./bin/python bin/mean_fusion.py -c <PATH_TO_BASELINE_RESULTS>/LOO/color/haralicksvm/grandtest-color-50-LOO_fakehead/grandtest-color-50-LOO_fakehead/scores/scores-dev -s <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/
...@@ -297,4 +302,4 @@ scoring for Haralick Mean fusion ...@@ -297,4 +302,4 @@ scoring for Haralick Mean fusion
python bin/scoring.py -df <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/scores_mean_fused_dev -ef <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/scores_mean_fused_eval python bin/scoring.py -df <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/scores_mean_fused_dev -ef <PATH_TO_BASELINE_RESULTS>/mean_fusion/haralick/grandtest-color-50-LOO_fakehead/scores_mean_fused_eval
Similarly, repeat the experiment for all protocols. Similarly, repeat the experiment for all LOO sub-protocols.
\ No newline at end of file \ No newline at end of file
...@@ -100,6 +100,19 @@ setup( ...@@ -100,6 +100,19 @@ setup(
'mc-face-preprocessor = bob.paper.mccnn.tifs2018.config.preprocessor.mc_preprocessor:mc_preprocessor', # returns Multi channel aligned face images 'mc-face-preprocessor = bob.paper.mccnn.tifs2018.config.preprocessor.mc_preprocessor:mc_preprocessor', # returns Multi channel aligned face images
], ],
'bob.bio.config': ['wmca-color = bob.paper.mccnn.tifs2018.database.batl_db_color',
'wmca-depth = bob.paper.mccnn.tifs2018.database.batl_db_depth',
'wmca-infrared = bob.paper.mccnn.tifs2018.database.batl_db_infrared',
'wmca-all= bob.paper.mccnn.tifs2018.database.batl_db_rgb_ir_d_t_grandtest',
'wmca-thermal = bob.paper.mccnn.tifs2018.database.batl_db_thermal',
#configs
'lbp-lr-infrared = bob.paper.mccnn.tifs2018.config.lbp_lr_infrared',
'lbp-lr-thermal= bob.paper.mccnn.tifs2018.config.lbp_lr_thermal',
'lbp-lr-depth= bob.paper.mccnn.tifs2018.config.lbp_lr_depth',
'fastnet = bob.paper.mccnn.tifs2018.config.FASNet_config',
'mccnn = bob.paper.mccnn.tifs2018.config.MCCNN_config',
'iqm-lr = bob.paper.mccnn.tifs2018.config.iqm_lr',
'haralick-svm = bob.paper.mccnn.tifs2018.config.haralick_svm',]
}, },
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment