Commit acc69cfd authored by Anjith GEORGE's avatar Anjith GEORGE

Additions and clean up

parent 7c75bcb0
Pipeline #28533 canceled with stages
in 19 minutes and 41 seconds
......@@ -32,18 +32,21 @@ import torch.optim as optim
4. Initialize the network architecture with required arguments.
5. Define the parameters for the trainer.
5. Define the parameters for the trainer.
6. Define loss function for the network
"""
#==============================================================================
# Initialize the bob database instance
data_folder_train='/idiap/temp/ageorge/oulunpu/mtcnn_aligned_gmm_224/preprocessed/'
output_base_path='/idiap/temp/ageorge/oulunpu/GenericScript/'
preprocessed_folder='<PREPROCESSED_FOLDER>'
####################################################################################
cnn_training_path='<CNN_OUTPUT_DIR>'
####################################################################
protocols="Protocol_1"
#################################
extension='.h5'
......@@ -64,10 +67,11 @@ groups={"train":['train'],"val":['dev']}
data_balance={"train":True,"val":False}
protocols="Protocol_1"
bob_hldi_instance =bob.db.oulunpu.Database()
output_base_path=cnn_training_path+protocols+'/'
#==============================================================================
# Initialize the torch dataset, subselect channels from the pretrained files if needed.
......@@ -83,7 +87,7 @@ dataset={}
for phase in phases:
dataset[phase] = DataFolderPixBiS(data_folder=data_folder_train,
dataset[phase] = DataFolderPixBiS(data_folder=preprocessed_folder,
transform=img_transform[phase],
extension='.hdf5',
bob_hldi_instance=bob_hldi_instance,
......@@ -92,8 +96,6 @@ for phase in phases:
purposes=['real', 'attack'],
allow_missing_files=True, do_balance=data_balance[phase],max_samples_per_file=50,channels='RGB', mask_op='flat',custom_size=14)
#==============================================================================
# Specify other training parameters
......@@ -121,8 +123,6 @@ network=DeepPixBiS(pretrained=True)
for name, param in network.named_parameters():
param.requires_grad = True
# if not 'enc' in name:
# param.requires_grad = True
# loss definitions
......@@ -136,7 +136,6 @@ criterion_bce = nn.BCELoss()
optimizer = optim.Adam(filter(lambda p: p.requires_grad, network.parameters()),lr = learning_rate, weight_decay=weight_decay)
def compute_loss(network,img, labels, device):
"""
Compute the losses, given the network, data and labels and
......@@ -168,7 +167,7 @@ Note: Running in GPU
jman submit --queue gpu \
--name generic \
--log-dir /idiap/temp/ageorge/oulunpu/GenericScript/logs/ \
--log-dir /idiap/temp/ageorge/oulunpu/GenericScript/Protocol_1/logs/ \
--environment="PYTHONUNBUFFERED=1" -- \
./bin/train_generic.py \
/idiap/user/ageorge/WORK/COMMON_ENV_PAD_BATL_DB/src/bob.learn.pytorch/bob/learn/pytorch/config/generic/oulu_deep_pixbis.py --use-gpu -vvv
......
import bob.db.oulunpu
from bob.extension import rc
ORIGINAL_DIRECTORY = rc["bob.db.oulunpu.directory"]
ORIGINAL_EXTENSION = ".h5" # extension of the data files
ANNOTATIONS_TEMP_DIR = ""
PROTOCOL='Protocol_1'
database = bob.db.oulunpu.Database(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION)
protocol = PROTOCOL
"""
You may modify this at runtime by specifying the option ``--protocol`` on the
command-line of ``spoof.py`` or using the keyword ``protocol`` on a
configuration file that is loaded **after** this configuration resource.
"""
groups = ["train", "dev", "eval"]
......@@ -53,7 +53,8 @@ from bob.paper.deep_pix_bis_pad.icb2019.extractor import DeepPixBiSExtractor
from bob.bio.video.extractor import Wrapper
MODEL_FILE='/idiap/user/ageorge/WORK/SCRIPTS_DUMP/scripts_dump/CAEandDBtest/GPUtry_torch_0.4/LEGACY_CNN/models/modelversion_5240interim_24.pth'
MODEL_FILE='<PATH_TO_MODEL>' # <CNN_OUTPUT_DIR>/Protocol**/model_0_0.pth
####################################################################
SCORING_METHOD='pixel_mean' # 'pixel_mean','binary','combined'
......
......@@ -52,7 +52,7 @@ preprocessor = Wrapper(preprocessor = _image_preprocessor,
## Imp convert to grayscale before running this
from bob.pad.face.extractor import LBPHistogram
from bob.paper.deep_pix_bis_pad.icb2019.extractor import LBPHistogram
from bob.bio.video.extractor import Wrapper
......
from __future__ import division
from bob.bio.base.extractor import Extractor
import bob.bio.video
import bob.ip.base
import numpy as np
import bob.ip.color
class LBPHistogram(Extractor):
"""Calculates a normalized LBP histogram over an image.
These features are implemented based on [CAM12]_.
Parameters
----------
lbptype : str
The type of the LBP operator (regular, uniform or riu2)
elbptype : str
The type of extended version of LBP (regular if not extended version
is used, otherwise transitional, direction_coded or modified)
rad : float
The radius of the circle on which the points are taken (for circular
LBP)
neighbors : int
The number of points around the central point on which LBP is
computed (4, 8, 16)
circ : bool
True if circular LBP is needed, False otherwise
n_hor : int
Number of blocks horizontally for spatially-enhanced LBP/MCT
histograms. Default: 1
n_vert
Number of blocks vertically for spatially-enhanced LBP/MCT
histograms. Default: 1
Attributes
----------
dtype : numpy.dtype
If a ``dtype`` is specified in the contructor, it is assured that the
resulting features have that dtype.
lbp : bob.ip.base.LBP
The LPB extractor object.
"""
def __init__(self,
lbptype='uniform',
elbptype='regular',
rad=1,
neighbors=8,
circ=False,
dtype=None,
n_hor=1,
n_vert=1):
super(LBPHistogram, self).__init__(
lbptype=lbptype,
elbptype=elbptype,
rad=rad,
neighbors=neighbors,
circ=circ,
dtype=dtype,
n_hor=n_hor,
n_vert=n_vert)
elbps = {
'regular': 'regular',
'transitional': 'trainsitional',
'direction_coded': 'direction-coded',
'modified': 'regular'
}
if elbptype == 'modified':
mct = True
else:
mct = False
if lbptype == 'uniform':
if neighbors == 16:
lbp = bob.ip.base.LBP(
neighbors=16,
uniform=True,
circular=circ,
radius=rad,
to_average=mct,
elbp_type=elbps[elbptype])
else: # we assume neighbors==8 in this case
lbp = bob.ip.base.LBP(
neighbors=8,
uniform=True,
circular=circ,
radius=rad,
to_average=mct,
elbp_type=elbps[elbptype])
elif lbptype == 'riu2':
if neighbors == 16:
lbp = bob.ip.base.LBP(
neighbors=16,
uniform=True,
rotation_invariant=True,
radius=rad,
circular=circ,
to_average=mct,
elbp_type=elbps[elbptype])
else: # we assume neighbors==8 in this case
lbp = bob.ip.base.LBP(
neighbors=8,
uniform=True,
rotation_invariant=True,
radius=rad,
circular=circ,
to_average=mct,
elbp_type=elbps[elbptype])
else: # regular LBP
if neighbors == 16:
lbp = bob.ip.base.LBP(
neighbors=16,
circular=circ,
radius=rad,
to_average=mct,
elbp_type=elbps[elbptype])
else: # we assume neighbors==8 in this case
lbp = bob.ip.base.LBP(
neighbors=8,
circular=circ,
radius=rad,
to_average=mct,
elbp_type=elbps[elbptype])
self.dtype = dtype
self.lbp = lbp
self.n_hor = n_hor
self.n_vert = n_vert
def comp_block_histogram(self, data):
"""
Extracts LBP/MCT histograms from a gray-scale image/block.
Takes data of arbitrary dimensions and linearizes it into a 1D vector;
Then, calculates the histogram.
enforcing the data type, if desired.
Parameters
----------
data : numpy.ndarray
The preprocessed data to be transformed into one vector.
Returns
-------
1D :py:class:`numpy.ndarray`
The extracted feature vector, of the desired ``dtype`` (if
specified)
"""
assert isinstance(data, np.ndarray)
# allocating the image with lbp codes
lbpimage = np.ndarray(self.lbp.lbp_shape(data), 'uint16')
self.lbp(data, lbpimage) # calculating the lbp image
hist = bob.ip.base.histogram(lbpimage, (0, self.lbp.max_label - 1),
self.lbp.max_label)
hist = hist / sum(hist) # histogram normalization
if self.dtype is not None:
hist = hist.astype(self.dtype)
return hist
def __call__(self, data):
"""
Extracts spatially-enhanced LBP/MCT histograms from a gray-scale image.
Parameters
----------
data : numpy.ndarray
The preprocessed data to be transformed into one vector.
Returns
-------
1D :py:class:`numpy.ndarray`
The extracted feature vector, of the desired ``dtype`` (if
specified)
"""
# Make sure the data can be split into equal blocks:
if len(data.shape)==3:
data=bob.ip.color.rgb_to_gray(data)
row_max = int(data.shape[0] / self.n_vert) * self.n_vert
col_max = int(data.shape[1] / self.n_hor) * self.n_hor
data = data[:row_max, :col_max]
blocks = [sub_block for block in np.hsplit(data, self.n_hor) for sub_block in np.vsplit(block, self.n_vert)]
hists = [self.comp_block_histogram(block) for block in blocks]
hist = np.hstack(hists)
hist = hist / len(blocks) # histogram normalization
return hist
from .DeepPixBiS import DeepPixBiSExtractor
from .LBPHistogram import LBPHistogram
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -29,24 +29,34 @@ by the following command.
.. code-block:: sh
./bin/spoof.py \ # spoof.py is used to run the preprocessor
oulunpu-db \ # run for OULU database
qm-svm \ # required by spoof.py, but unused
--protocol Protocol_1 # Protocol to use
oulunpu \ # run for OULU database
iqm-svm \ # required by spoof.py, but unused
--protocol Protocol_1 \ # Protocol to use
--execute-only preprocessing \ # execute only preprocessing step
--allow-missing-files \ # allow failed files
--grid idiap \ # use grid, only for Idiap users, REMOVE otherwise
--sub-directory <PATH_TO_STORE_THE_RESULTS> # define your path here
Similarly, run the scripts for all protocols in the OULU-NPU dataset.
Similarly, run the scripts for all protocols in the OULU-NPU dataset. To run the experiments with different protocols
Replace the ``Protocol_1`` line with different protocols in: 'Protocol_1', 'Protocol_2', 'Protocol_3_1', 'Protocol_3_2', 'Protocol_3_3', 'Protocol_3_4', 'Protocol_3_5', 'Protocol_3_6', 'Protocol_4_1', 'Protocol_4_2', 'Protocol_4_3', 'Protocol_4_4', 'Protocol_4_5', 'Protocol_4_6'.
After running this command, the preprocessed files will be stored in ``<PATH_TO_STORE_THE_RESULTS>/preprocessed/``
After running these commands, the preprocessed files from all the protocols will be stored in ``<PATH_TO_STORE_THE_RESULTS>/preprocessed/``
2. Training DeepPixBiS model
----------------------------
After preprocessing is done, training the CNN for ``Protocol_1`` can be done as follows.
Edit the config file ``cnn_trainer_config/oulu_deep_pixbis.py`` to set the preprocessed folder and output folders.
For other protocols change the value of ``protocol`` variable in the same config file and run training again.
.. note::
Be sure to update ``<PATH_TO_CONFIG>/cnn_trainer_config/oulu_deep_pixbis.py`` with the folder paths and protocol names.
.. code-block:: sh
./bin/train_pixbis.py path_to_config.py -vvv
./bin/train_pixbis.py <PATH_TO_CONFIG>/cnn_trainer_config/oulu_deep_pixbis.py -vvv
.. note::
......@@ -54,7 +64,7 @@ After running this command, the preprocessed files will be stored in ``<PATH_TO_
.. code-block:: sh
/bin/train_generic.py path_to_config.py -vvv --use-gpu
/bin/train_generic.py <PATH_TO_CONFIG>/cnn_trainer_config/oulu_deep_pixbis.py -vvv --use-gpu
For Idiap Grid, the job can be submitted as follows:
......@@ -65,25 +75,32 @@ For Idiap Grid, the job can be submitted as follows:
--log-dir <FOLDER_TO_SAVE_THE_RESULTS>/logs/ \
--environment="PYTHONUNBUFFERED=1" -- \
./bin/train_pixbis.py \
path_to_config.py \
<PATH_TO_CONFIG>/cnn_trainer_config/oulu_deep_pixbis.py \
--use-gpu \
-vv
Once this script is completed, the best model based on lowest validation loss can be found at ``<CNN_OUTPUT_DIR>/model_0_0.pth``.
Once this script is completed, the best model based on lowest validation loss can be found at ``<CNN_OUTPUT_DIR>/Protocol_1/model_0_0.pth``.
3. Running the pipeline (PAD Experiment)
----------------------------------------
Once the CNN training is completed, we can run the bob pipeline by using the trained model as an extractor.
For Protocol_1 this can be done with the following command.
.. note::
Be sure to update ``<PATH_TO_CONFIG/deep_pix_bis.py>`` with the path to the model (`MODEL_FILE`) for the specific protocol.
This can be achieved by the following command:
.. code-block:: sh
./bin/spoof.py \ # spoof.py is used to run the experiment
oulunpu-db \ # OULU NPU dataset
oulunpu \ # OULU NPU dataset
pix_bis \ # configuration defining Preprocessor, Extractor, and Algorithm
--protocol Protocol_1 \ # Define the configuration
--allow-missing-files \ # don't stop the execution if some files are missing
......@@ -99,13 +116,11 @@ The scores obtained can be tested as well.
.. code-block:: sh
./bin/bob bio evaluate -e -v -l '<SAVE_FOLDER>/metrix.txt' \
-o '<SAVE_FOLDER>/curves.pdf' \
-lg "DeepPixBiS" \
.bin/scoring_acer.py -df \
<FOLDER_TO_SAVE_THE_RESULTS>/Protocol_1/scores/scores-dev \
<FOLDER_TO_SAVE_THE_RESULTS>/Protocol_1/scores/scores-eval \
-ef <FOLDER_TO_SAVE_THE_RESULTS>/Protocol_1/scores/scores-eval \
-l "DeepPixBiS" -s results \
The files **metrix.txt** and **curves.pdf**, containing error rates and evaluation curves, are saved to ``<SAVE_FOLDER>`` location.
.. include:: links.rst
......
......@@ -39,6 +39,14 @@ setup(
'scoring_acer.py = bob.paper.deep_pix_bis_pad.icb2019.script.scoring_acer:main',
'multi_eval.py = bob.paper.deep_pix_bis_pad.icb2019.script.multi_eval:main',
],
# registered configurations:
#databases
'bob.bio.config': ['replay-mobile = bob.pad.face.config.replay_mobile',
#configs
'glbp-svm = bob.paper.deep_pix_bis_pad.icb2019.config.lbp_svm',
'iqm-svm = bob.paper.deep_pix_bis_pad.icb2019.config.qm_svm',
'deep-pix-bis = bob.paper.deep_pix_bis_pad.icb2019.config.deep_pix_bis',]
},
# check classifiers, add and remove as you see fit
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment