Commit ffd5e023 authored by Olegs NIKISINS's avatar Olegs NIKISINS

Merge branch 'batl_hldi' into 'master'

Added HLDI for the BATL database, added optional data normalization in FaceCropAlign

See merge request !59
parents fda0ad1a f627bc6c
Pipeline #18375 passed with stages
in 64 minutes and 32 seconds
#!/usr/bin/env python
"""
BATL Db is a database for face PAD experiments.
"""
from bob.pad.face.database import BatlPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
ORIGINAL_DIRECTORY = "[YOUR_BATL_DB_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
ANNOTATIONS_TEMP_DIR = ""
PROTOCOL = 'nowig-color-50'
database = BatlPadDatabase(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir=ANNOTATIONS_TEMP_DIR,
landmark_detect_method="mtcnn",
training_depends_on_protocol=True,
)
"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
database settings.
.. warning::
This class only provides a programmatic interface to load data in an orderly
manner, respecting usage protocols. It does **not** contain the raw
data files. You should procure those yourself.
Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
value to the places where you actually installed the BATL database.
"""
protocol = PROTOCOL
"""
You may modify this at runtime by specifying the option ``--protocol`` on the
command-line of ``spoof.py`` or using the keyword ``protocol`` on a
configuration file that is loaded **after** this configuration resource.
"""
groups = ["train", "dev", "eval"]
"""The default groups to use for reproducing the baselines.
You may modify this at runtime by specifying the option ``--groups`` on the
command-line of ``spoof.py`` or using the keyword ``groups`` on a
configuration file that is loaded **after** this configuration resource.
"""
#!/usr/bin/env python
"""
BATL Db is a database for face PAD experiments.
"""
from bob.pad.face.database import BatlPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
ORIGINAL_DIRECTORY = "[YOUR_BATL_DB_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
ANNOTATIONS_TEMP_DIR = ""
PROTOCOL = 'nowig-depth-50'
database = BatlPadDatabase(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir=ANNOTATIONS_TEMP_DIR,
landmark_detect_method="mtcnn",
training_depends_on_protocol=True,
)
"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
database settings.
.. warning::
This class only provides a programmatic interface to load data in an orderly
manner, respecting usage protocols. It does **not** contain the raw
data files. You should procure those yourself.
Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
value to the places where you actually installed the BATL database.
"""
protocol = PROTOCOL
"""
You may modify this at runtime by specifying the option ``--protocol`` on the
command-line of ``spoof.py`` or using the keyword ``protocol`` on a
configuration file that is loaded **after** this configuration resource.
"""
groups = ["train", "dev", "eval"]
"""The default groups to use for reproducing the baselines.
You may modify this at runtime by specifying the option ``--groups`` on the
command-line of ``spoof.py`` or using the keyword ``groups`` on a
configuration file that is loaded **after** this configuration resource.
"""
#!/usr/bin/env python
"""
BATL Db is a database for face PAD experiments.
"""
from bob.pad.face.database import BatlPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
ORIGINAL_DIRECTORY = "[YOUR_BATL_DB_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
ANNOTATIONS_TEMP_DIR = ""
PROTOCOL = 'nowig-infrared-50'
database = BatlPadDatabase(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir=ANNOTATIONS_TEMP_DIR,
landmark_detect_method="mtcnn",
training_depends_on_protocol=True,
)
"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
database settings.
.. warning::
This class only provides a programmatic interface to load data in an orderly
manner, respecting usage protocols. It does **not** contain the raw
data files. You should procure those yourself.
Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
value to the places where you actually installed the BATL database.
"""
protocol = PROTOCOL
"""
You may modify this at runtime by specifying the option ``--protocol`` on the
command-line of ``spoof.py`` or using the keyword ``protocol`` on a
configuration file that is loaded **after** this configuration resource.
"""
groups = ["train", "dev", "eval"]
"""The default groups to use for reproducing the baselines.
You may modify this at runtime by specifying the option ``--groups`` on the
command-line of ``spoof.py`` or using the keyword ``groups`` on a
configuration file that is loaded **after** this configuration resource.
"""
#!/usr/bin/env python
"""
BATL Db is a database for face PAD experiments.
"""
from bob.pad.face.database import BatlPadDatabase
# Directory where the data files are stored.
# This directory is given in the .bob_bio_databases.txt file located in your home directory
ORIGINAL_DIRECTORY = "[YOUR_BATL_DB_DIRECTORY]"
"""Value of ``~/.bob_bio_databases.txt`` for this database"""
ORIGINAL_EXTENSION = ".h5" # extension of the data files
ANNOTATIONS_TEMP_DIR = ""
PROTOCOL = 'nowig-thermal-50'
database = BatlPadDatabase(
protocol=PROTOCOL,
original_directory=ORIGINAL_DIRECTORY,
original_extension=ORIGINAL_EXTENSION,
annotations_temp_dir=ANNOTATIONS_TEMP_DIR,
landmark_detect_method="mtcnn",
training_depends_on_protocol=True,
)
"""The :py:class:`bob.pad.base.database.BatlPadDatabase` derivative with BATL Db
database settings.
.. warning::
This class only provides a programmatic interface to load data in an orderly
manner, respecting usage protocols. It does **not** contain the raw
data files. You should procure those yourself.
Notice that ``original_directory`` is set to ``[BatlPadDatabase]``.
You must make sure to create ``${HOME}/.bob_bio_databases.txt`` file setting this
value to the places where you actually installed the BATL database.
"""
protocol = PROTOCOL
"""
You may modify this at runtime by specifying the option ``--protocol`` on the
command-line of ``spoof.py`` or using the keyword ``protocol`` on a
configuration file that is loaded **after** this configuration resource.
"""
groups = ["train", "dev", "eval"]
"""The default groups to use for reproducing the baselines.
You may modify this at runtime by specifying the option ``--groups`` on the
command-line of ``spoof.py`` or using the keyword ``groups`` on a
configuration file that is loaded **after** this configuration resource.
"""
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This file contains configurations to run LBP and SVM based face PAD baseline.
The settings are tuned for the Replay-attack database.
The idea of the algorithm is introduced in the following paper: [CAM12]_.
However some settings are different from the ones introduced in the paper.
"""
#=======================================================================================
sub_directory = 'lbp_svm'
"""
Sub-directory where results will be placed.
You may change this setting using the ``--sub-directory`` command-line option
or the attribute ``sub_directory`` in a configuration file loaded **after**
this resource.
"""
#=======================================================================================
# define preprocessor:
from ..preprocessor import FaceCropAlign
from bob.bio.video.preprocessor import Wrapper
from bob.bio.video.utils import FrameSelector
from ..preprocessor.FaceCropAlign import auto_norm_image as _norm_func
FACE_SIZE = 64 # The size of the resulting face
RGB_OUTPUT_FLAG = False # Gray-scale output
USE_FACE_ALIGNMENT = False # use annotations
MAX_IMAGE_SIZE = None # no limiting here
FACE_DETECTION_METHOD = None # use annotations
MIN_FACE_SIZE = 50 # skip small faces
NORMALIZATION_FUNCTION = _norm_func
NORMALIZATION_FUNCTION_KWARGS = {}
NORMALIZATION_FUNCTION_KWARGS = {'n_sigma':3.0, 'norm_method':'MAD'}
_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
rgb_output_flag = RGB_OUTPUT_FLAG,
use_face_alignment = USE_FACE_ALIGNMENT,
max_image_size = MAX_IMAGE_SIZE,
face_detection_method = FACE_DETECTION_METHOD,
min_face_size = MIN_FACE_SIZE,
normalization_function = NORMALIZATION_FUNCTION,
normalization_function_kwargs = NORMALIZATION_FUNCTION_KWARGS)
_frame_selector = FrameSelector(selection_style = "all")
preprocessor = Wrapper(preprocessor = _image_preprocessor,
frame_selector = _frame_selector)
"""
In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
"""
#=======================================================================================
# define extractor:
from ..extractor import LBPHistogram
from bob.bio.video.extractor import Wrapper
LBPTYPE = 'uniform'
ELBPTYPE = 'regular'
RAD = 1
NEIGHBORS = 8
CIRC = False
DTYPE = None
extractor = Wrapper(LBPHistogram(
lbptype=LBPTYPE,
elbptype=ELBPTYPE,
rad=RAD,
neighbors=NEIGHBORS,
circ=CIRC,
dtype=DTYPE))
"""
In the feature extraction stage the LBP histograms are extracted from each frame of the preprocessed video.
The parameters are similar to the ones introduced in [CAM12]_.
"""
#=======================================================================================
# define algorithm:
from bob.pad.base.algorithm import LogRegr
C = 1. # The regularization parameter for the LR classifier
FRAME_LEVEL_SCORES_FLAG = True # Return one score per frame
algorithm = LogRegr(
C=C, frame_level_scores_flag=FRAME_LEVEL_SCORES_FLAG)
"""
The Logistic Regression is used to classify the data into *real* and *attack* classes.
One score is produced for each frame of the input video, ``frame_level_scores_flag = True``.
The sub-sampling of training data is not used here, sub-sampling flags have default ``False``
values.
"""
......@@ -4,6 +4,7 @@ from .replay_mobile import ReplayMobilePadDatabase
from .msu_mfsd import MsuMfsdPadDatabase
from .aggregated_db import AggregatedDbPadDatabase
from .mifs import MIFSPadDatabase
from .batl import BatlPadDatabase
# gets sphinx autodoc done right - don't remove it
......@@ -29,6 +30,7 @@ __appropriate__(
MsuMfsdPadDatabase,
AggregatedDbPadDatabase,
MIFSPadDatabase,
BatlPadDatabase,
)
__all__ = [_ for _ in dir() if not _.startswith('_')]
This diff is collapsed.
......@@ -20,6 +20,63 @@ import bob.ip.base
import importlib
# ==============================================================================
def auto_norm_image(data, annotations, n_sigma=3.0, norm_method='MAD'):
"""
Normalizes a single channel image to range 0-255, using the data distribution
Expects single channel images
**method: Gaussian , MAD, MINMAX
**n_sigma: The range which is normalized
"""
face = data[annotations['topleft'][0]:annotations['bottomright'][0],
annotations['topleft'][1]:annotations['bottomright'][1]]
face = face.astype('float')
data = data.astype('float')
assert(len(data.shape)==2)
face_c = np.ma.array(face).compressed()
# Avoiding zeros from flat field in thermal and holes in depth
face_c=face_c[face_c>1.0]
if norm_method=='STD':
mu = np.mean(face_c)
std = np.std(face_c)
data_n=((data-mu+n_sigma*std)/(2.0*n_sigma*std))*255.0
if norm_method=='MAD':
med = np.median(face_c)
mad = np.median(np.abs(face_c - med))
data_n = ((data-med+n_sigma*mad)/(2.0*n_sigma*mad))*255.0
if norm_method=='MINMAX':
t_min = np.min(face_c)
t_max = np.max(face_c)
data_n = ((data-t_min)/(t_max-t_min))*255.0
# Clamping to 0-255
data_n=np.maximum(data_n,0)
data_n=np.minimum(data_n,255)
data_n = data_n.astype('uint8')
return data_n
# ==============================================================================
def get_eye_pos(lm):
"""
......@@ -347,6 +404,16 @@ class FaceCropAlign(Preprocessor):
``min_face_size`` : :py:class:`int`
The minimal size of the face in pixels to be processed.
Default: None.
``normalization_function`` : function
Function to be applied to the input image before cropping and
normalization. For example, type-casting to uint8 format and
data normalization, using facial region only (annotations).
The expected signature of the function:
``normalization_function(image, annotations, **kwargs)``.
``normalization_function_kwargs`` : :py:class:`dict`
Key-word arguments for the ``normalization_function``.
"""
# ==========================================================================
......@@ -355,14 +422,18 @@ class FaceCropAlign(Preprocessor):
use_face_alignment,
max_image_size=None,
face_detection_method=None,
min_face_size=None):
min_face_size=None,
normalization_function=None,
normalization_function_kwargs = None):
Preprocessor.__init__(self, face_size=face_size,
rgb_output_flag=rgb_output_flag,
use_face_alignment=use_face_alignment,
max_image_size=max_image_size,
face_detection_method=face_detection_method,
min_face_size=min_face_size)
min_face_size=min_face_size,
normalization_function=normalization_function,
normalization_function_kwargs = normalization_function_kwargs)
self.face_size = face_size
self.rgb_output_flag = rgb_output_flag
......@@ -371,6 +442,8 @@ class FaceCropAlign(Preprocessor):
self.max_image_size = max_image_size
self.face_detection_method = face_detection_method
self.min_face_size = min_face_size
self.normalization_function = normalization_function
self.normalization_function_kwargs = normalization_function_kwargs
self.supported_face_detection_method = ["dlib", "mtcnn"]
......@@ -465,6 +538,10 @@ class FaceCropAlign(Preprocessor):
return None
if self.normalization_function is not None:
image = self.normalization_function(image, annotations, **self.normalization_function_kwargs)
norm_face_image = normalize_image_size(image=image,
annotations=annotations,
face_size=self.face_size,
......
......@@ -63,6 +63,10 @@ setup(
'msu-mfsd = bob.pad.face.config.msu_mfsd:database',
'aggregated-db = bob.pad.face.config.aggregated_db:database',
'mifs = bob.pad.face.config.mifs:database',
'batl-db = bob.pad.face.config.batl_db:database',
'batl-db-infrared = bob.pad.face.config.batl_db_infrared:database',
'batl-db-depth = bob.pad.face.config.batl_db_depth:database',
'batl-db-thermal = bob.pad.face.config.batl_db_thermal:database',
],
# registered configurations:
......@@ -73,6 +77,10 @@ setup(
'msu-mfsd = bob.pad.face.config.msu_mfsd',
'aggregated-db = bob.pad.face.config.aggregated_db',
'mifs = bob.pad.face.config.mifs',
'batl-db = bob.pad.face.config.batl_db',
'batl-db-infrared = bob.pad.face.config.batl_db_infrared',
'batl-db-depth = bob.pad.face.config.batl_db_depth',
'batl-db-thermal = bob.pad.face.config.batl_db_thermal',
# baselines using SVM:
'lbp-svm = bob.pad.face.config.lbp_svm',
......@@ -88,6 +96,7 @@ setup(
# baselines using LR:
'qm-lr = bob.pad.face.config.qm_lr', # this pipe-line can be used both for individual and Aggregated databases.
'lbp-lr-batl-D-T-IR = bob.pad.face.config.lbp_lr_batl_D_T_IR', # this pipe-line can be used both for BATL databases, Depth, Thermal and Infrared channels.
# baselines using GMM:
'qm-one-class-gmm = bob.pad.face.config.qm_one_class_gmm', # this pipe-line can be used both for individual and Aggregated databases.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment