Commit 85f05c73 authored by Manuel Günther's avatar Manuel Günther
Browse files

Optimized implementation of GridGraph; added tons of documentation; updated eigenface configuration

parent dd96ab29
from .GaborJet import GaborJet
from .LGBPHS import LGBPHS
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -4,5 +4,5 @@ import bob.bio.face
# compute eigenfaces using the training database
extractor = bob.bio.face.extractor.Eigenface(
subspace_dimension = 100
subspace_dimension = .95
)
......@@ -4,9 +4,6 @@ import bob.bio.base
import bob.bio.face
import math
# load the face cropping parameters
cropper = bob.bio.base.load_resource("face-crop-eyes", "preprocessor")
extractor = bob.bio.face.extractor.GridGraph(
# Gabor parameters
gabor_sigma = math.sqrt(2.) * math.pi,
......@@ -15,7 +12,5 @@ extractor = bob.bio.face.extractor.GridGraph(
normalize_gabor_jets = True,
# setup of the fixed grid
node_distance = (4, 4),
first_node = (6, 6),
image_resolution = cropper.cropped_image_size
node_distance = (8, 8)
)
......@@ -36,7 +36,18 @@ class Eigenface (Extractor):
logger.info(" -> Training LinearMachine using PCA (SVD)")
t = bob.learn.linear.PCATrainer()
self.machine, __eig_vals = t.train(data)
self.machine, variances = t.train(data)
# compute variance percentage, if desired
if isinstance(self.subspace_dimension, float):
cummulated = numpy.cumsum(variances) / numpy.sum(variances)
for index in range(len(cummulated)):
if cummulated[index] > self.subspace_dimension:
self.subspace_dimension = index
break
self.subspace_dimension = index
logger.info(" -> Keeping %d eigenvectors" % self.subspace_dimension)
# Machine: get shape, then resize
self.machine.resize(self.machine.shape[0], self.subspace_dimension)
self.machine.save(bob.io.base.HDF5File(extractor_file, "w"))
......
......@@ -35,7 +35,6 @@ class GridGraph (Extractor):
# setup of static grid
node_distance = None, # one or two integral values
image_resolution = None, # always two integral values
first_node = None, # one or two integral values, or None -> automatically determined
):
......@@ -57,7 +56,6 @@ class GridGraph (Extractor):
nodes_above_eyes = nodes_above_eyes,
nodes_below_eyes = nodes_below_eyes,
node_distance = node_distance,
image_resolution = image_resolution,
first_node = first_node
)
......@@ -74,7 +72,7 @@ class GridGraph (Extractor):
# create graph extractor
if eyes is not None:
self.graph = bob.ip.gabor.Graph(
self._aligned_graph = bob.ip.gabor.Graph(
righteye = [int(e) for e in eyes['reye']],
lefteye = [int(e) for e in eyes['leye']],
between = int(nodes_between_eyes),
......@@ -83,43 +81,63 @@ class GridGraph (Extractor):
below = int(nodes_below_eyes)
)
else:
if node_distance is None or image_resolution is None:
raise ValueError("Please specify either 'eyes' or the grid parameters 'first_node', 'last_node', and 'node_distance'!")
if isinstance(node_distance, (int, float)):
node_distance = (int(node_distance), int(node_distance))
if first_node is None:
if node_distance is None:
raise ValueError("Please specify either 'eyes' or the grid parameters 'node_distance' (and 'first_node')!")
self._aligned_graph = None
self._last_image_resolution = None
self.first_node = first_node
self.node_distance = node_distance
if isinstance(self.node_distance, (int, float)):
self.node_distance = (int(self.node_distance), int(self.node_distance))
self.normalize_jets = normalize_gabor_jets
self.trafo_image = None
def _extractor(self, image):
"""Creates an extractor based on the given image."""
if self.trafo_image is None or self.trafo_image.shape[1:3] != image.shape:
# create trafo image
self.trafo_image = numpy.ndarray((self.gwt.number_of_wavelets, image.shape[0], image.shape[1]), numpy.complex128)
if self._aligned_graph is not None:
return self._aligned_graph
if self._last_image_resolution != image.shape:
self._last_image_resolution = image.shape
if self.first_node is None:
first_node = [0,0]
for i in (0,1):
offset = int((image_resolution[i] - int(image_resolution[i]/node_distance[i])*node_distance[i]) / 2)
if offset < node_distance[i]//2: # This is not tested, but should ALWAYS be the case.
offset += node_distance[i]//2
offset = int((image.shape[i] - int(image.shape[i]/self.node_distance[i])*self.node_distance[i]) / 2)
if offset < self.node_distance[i]//2: # This is not tested, but should ALWAYS be the case.
offset += self.node_distance[i]//2
first_node[i] = offset
last_node = tuple([int(image_resolution[i] - max(first_node[i],1)) for i in (0,1)])
else:
first_node = self.first_node
last_node = tuple([int(image.shape[i] - max(first_node[i],1)) for i in (0,1)])
# take the specified nodes
self.graph = bob.ip.gabor.Graph(
self._graph = bob.ip.gabor.Graph(
first = first_node,
last = last_node,
step = node_distance
step = self.node_distance
)
self.normalize_jets = normalize_gabor_jets
self.trafo_image = None
return self._graph
def __call__(self, image):
assert image.ndim == 2
assert isinstance(image, numpy.ndarray)
assert image.dtype == numpy.float64
if self.trafo_image is None or self.trafo_image.shape[1:3] != image.shape:
# create trafo image
self.trafo_image = numpy.ndarray((self.gwt.number_of_wavelets, image.shape[0], image.shape[1]), numpy.complex128)
extractor = self._extractor(image)
# perform Gabor wavelet transform
self.gwt.transform(image, self.trafo_image)
# extract face graph
jets = self.graph.extract(self.trafo_image)
jets = extractor.extract(self.trafo_image)
# normalize the Gabor jets of the graph only
if self.normalize_jets:
......
......@@ -2,3 +2,6 @@ from .DCTBlocks import DCTBlocks
from .GridGraph import GridGraph
from .LGBPHS import LGBPHS
from .Eigenface import Eigenface
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -45,6 +45,8 @@ class FaceDetect (Base):
lowest_scale = lowest_scale
)
assert face_cropper is not None
self.sampler = bob.ip.facedetect.Sampler(scale_factor=scale_base, lowest_scale=lowest_scale, distance=distance)
if cascade is None:
self.cascade = bob.ip.facedetect.default_cascade()
......
......@@ -6,3 +6,6 @@ from .TanTriggs import TanTriggs
from .INormLBP import INormLBP
from .HistogramEqualization import HistogramEqualization
from .SelfQuotientImage import SelfQuotientImage
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
......@@ -58,8 +58,9 @@ def command_line_arguments(command_line_parameters):
parser.add_argument('-d', '--database', choices = available_databases, default = 'atnt', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directory to write
parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see ./bin/verify.py --help).')
# - the directories to write to
parser.add_argument('-T', '--temp-directory', help = 'The directory to write temporary the data of the experiment into. If not specified, the default directory of the verify.py script is used (see ./bin/verify.py --help).')
parser.add_argument('-R', '--result-directory', help = 'The directory to write the resulting score files of the experiment into. If not specified, the default directories of the verify.py script are used (see ./bin/verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
......@@ -225,9 +226,11 @@ def main(command_line_parameters = None):
if has_eval:
command += ['--groups', 'dev', 'eval']
# set the directories, if desired; we set both directories to be identical.
if args.directory is not None:
command += ['--temp-directory', os.path.join(args.directory, args.database), '--result-directory', os.path.join(args.directory, args.database)]
# set the directories, if desired
if args.temp_directory is not None:
command += ['--temp-directory', os.path.join(args.temp_directory)]
if args.result_directory is not None:
command += ['--result-directory', os.path.join(args.result_directory)]
# set the verbosity level
if args.verbose:
......@@ -249,17 +252,17 @@ def main(command_line_parameters = None):
# get the base directory of the results
is_idiap = os.path.isdir("/idiap")
if args.directory is None:
args.directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results"
if not os.path.exists(args.directory):
if args.result_directory is None:
args.result_directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results"
if not os.path.exists(args.result_directory):
if not args.dry_run:
raise IOError("The result directory '%s' cannot be found. Please specify the --directory as it was specified during execution of the algorithms." % args.directory)
raise IOError("The result directory '%s' cannot be found. Please specify the --result-directory as it was specified during execution of the algorithms." % args.result_directory)
# get the result directory of the database
result_dir = os.path.join(args.directory, args.baseline_directory)
result_dir = os.path.join(args.result_directory, args.baseline_directory)
if not os.path.exists(result_dir):
if not args.dry_run:
raise IOError("The result directory '%s' for the desired database cannot be found. Did you already run the experiments for this database?" % result_dir)
raise IOError("The result directory '%s' for the desired experiment cannot be found. Did you already run the experiments?" % result_dir)
# iterate over the algorithms and collect the result files
result_dev = []
......
......@@ -74,7 +74,7 @@ def test_graphs():
assert not graph.requires_training
# generate smaller extractor, using mixed tuple and int input for the node distance and first location
graph = bob.bio.face.extractor.GridGraph(node_distance = 24, image_resolution = data.shape)
graph = bob.bio.face.extractor.GridGraph(node_distance = 24)
# extract features
feature = graph(data)
......@@ -104,7 +104,7 @@ def test_graphs():
nodes_below_eyes = 7
)
nodes = graph.graph.nodes
nodes = graph._extractor(data).nodes
assert len(nodes) == 100
assert numpy.allclose(nodes[22], eyes['reye'])
assert numpy.allclose(nodes[27], eyes['leye'])
......
.. vim: set fileencoding=utf-8 :
.. author: Manuel Günther <manuel.guenther@idiap.ch>
.. date: Thu Sep 20 11:58:57 CEST 2012
.. _baselines:
=============================
Executing Baseline Algorithms
=============================
The first thing you might want to do is to execute one of the baseline face recognition algorithms that are implemented in ``bob.bio``.
Setting up your Database
------------------------
As mentioned in the documentation of :ref:`bob.bio.base <bob.bio.base>`, the image databases are not included in this package, so you have to download them.
For example, you can easily download the images of the `AT&T database`_, for links to other utilizable image databases please read the :ref:`bob.bio.face.databases` section.
By default, ``bob.bio`` does not know, where the images are located.
Hence, before running experiments you have to specify the image database directories.
How this is done is explained in more detail in the :ref:`bob.bio.base.installation`.
Running Baseline Experiments
----------------------------
To run the baseline experiments, you can use the ``./bin/baselines.py`` script by just going to the console and typing:
.. code-block:: sh
$ ./bin/baselines.py
This script is a simple wrapper for the ``./bin/verify.py`` script that is explained in more detail in :ref:`bob.bio.base.experiments`.
The ``./bin/baselines.py --help`` option shows you, which other options you have.
Here is an almost complete extract:
* ``--database``: The database and protocol you want to use.
By default this is set to the image database *atnt*.
* ``--algorithms``: The recognition algorithms that you want to execute.
By default, only the *eigenface* algorithm is executed.
* ``--all``: Execute all algorithms that are implemented.
* ``--temp-directory``: The directory where temporary files of the experiments are put to.
* ``--result-directory``: The directory where resulting score files of the experiments are put to.
* ``--evaluate``: After running the experiments, the resulting score files will be evaluated, and the result is written to console.
* ``--dry-run``: Instead of executing the algorithm (or the evaluation), only print the command that would have been executed.
* ``--verbose``: Increase the verbosity level of the script.
By default, only the commands that are executed are printed, and the rest of the calculation runs quietly.
You can increase the verbosity by adding the ``--verbose`` parameter repeatedly (up to three times).
Usually it is a good idea to have at least verbose level 2 (i.e., calling ``./bin/baselines.py --verbose --verbose``, or the short version ``./bin/baselines.py -vv``).
Running in Parallel
~~~~~~~~~~~~~~~~~~~
To run the experiments in parallel, as usual you can define an SGE grid configuration, or run with parallel threads on the local machine.
For the ``./bin/baselines.py`` script, the grid configuration is adapted to each of the algorithms.
Hence, to run in the SGE grid, you can simply add the ``--grid`` command line option, without parameters.
Similarly, to run the experiments in parallel on the local machine, simply add a ``--parallel <N>`` option, where ``<N>`` specifies the number of parallel jobs you want to execute.
When running the algorithms from the :ref:`bob.bio.gmm <bob.bio.gmm>` package in parallel, the specialized scripts are executed.
This will speed up the training of the UBM (and possible additional steps) tremendously.
The Algorithms
--------------
The algorithms present an (incomplete) set of state-of-the-art face recognition algorithms. Here is the list of short-cuts:
* ``eigenface``: The eigenface algorithm as proposed by [TP91]_. It uses the pixels as raw data, and applies a *Principal Component Analysis* (PCA) on it:
- preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
- feature : :py:class:`bob.bio.base.extractor.Linearize`
- algorithm : :py:class:`bob.bio.base.algorithm.PCA`
* ``lda``: The LDA algorithm applies a *Linear Discriminant Analysis* (LDA), here we use the combined PCA+LDA approach [ZKC+98]_:
- preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
- feature : :py:class:`bob.bio.face.extractor.Eigenface`
- algorithm : :py:class:`bob.bio.base.algorithm.LDA`
* ``gaborgraph``: This method extract grid graphs of Gabor jets from the images, and computes a Gabor phase based similarity [GHW12]_.
- preprocessor : :py:class:`bob.bio.face.preprocessor.INormLBP`
- feature : :py:class:`bob.bio.face.extractor.GridGraph`
- algorithm : :py:class:`bob.bio.face.algorithm.GaborJet`
* ``lgbphs``: *Local Gabor Binary Pattern Histogram Sequences* (LGBPHS) [ZSG+05]_ are extracted from the images and compares using the histogram intersection measure:
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.LGBPHS`
- algorithm : :py:class:`bob.bio.face.algorithm.LGBPHS`
* ``plda``: *Probabilistic LDA* (PLDA) [Pri07]_ is a probabilistic generative version of the LDA, in its scalable formulation of [ESM+13]_.
Here, we also apply it on pixel-based representations of the image, though also other features should be possible.
- preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
- feature : :py:class:`bob.bio.base.extractor.Linearize`
- algorithm : :py:class:`bob.bio.base.algorithm.PLDA`
* ``bic``: In the *Bayesian Intrapersonal/Extrapersonal Classifier* (BIC) [MWP98]_, a gabor-grid-graph based similarity vector is classified to be intrapersonal (i.e., both images are from the same person) or extrapersonal, as explained in [GW09]_.
- preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
- feature : :py:class:`bob.bio.face.extractor.GridGraph`
- algorithm : :py:class:`bob.bio.base.algorithm.BIC`
.. note::
The ``plda`` algorithm is currently under construction and the setup is not yet useful.
Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` package is installed:
* ``gmm``: *Gaussian Mixture Models* (GMM) [MM09]_ are extracted from *Discrete Cosine Transform* (DCT) block features.
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
- algorithm : :py:class:`bob.bio.gmm.algorithm.GMM`
* ``isv``: As an extension of the GMM algorithm, *Inter-Session Variability* (ISV) modeling [WMM+11]_ is used to learn what variations in images are introduced by identity changes and which not.
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
- algorithm : :py:class:`bob.bio.gmm.algorithm.ISV`
* ``ivector``: Another extension of the GMM algorithm is *Total Variability* (TV) modeling [WM12]_ (aka. I-Vector), which tries to learn a subspace in the GMM super-vector space.
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
- algorithm : :py:class:`bob.bio.gmm.algorithm.IVector`
.. note::
The ``ivector`` algorithm needs a lot of training data and fails on small databases such as the `AT&T database`_.
Additionally, the following algorithms can be executed, when the :ref:`bob.bio.csu <bob.bio.csu>` package is installed.
* ``lrpca``: In Local Region PCA [PBD+11]_, the face is sub-divided into local regions and a PCA is performed for each local region.
- preprocessor : :py:class:`bob.bio.csu.preprocessor.LRPCA`
- feature : :py:class:`bob.bio.csu.extractor.LRPCA`
- algorithm : :py:class:`bob.bio.csu.algorithm.LRPCA`
* ``lda_ir``: The LDA-IR (a.k.a. CohortLDA [LBP+12]_) extracts color information from images after, and computes a PCA+LDA projection on two color layers.
- preprocessor : :py:class:`bob.bio.csu.preprocessor.LDAIR`
- feature : :py:class:`bob.bio.csu.extractor.LDAIR`
- algorithm : :py:class:`bob.bio.csu.algorithm.LDAIR`
.. note::
The ``lrpca`` and ``ldair`` algorithms require hand-labeled eye locations.
Therefore, they can not be run on the default ``atnt`` database.
.. _baseline_results:
Baseline Results
----------------
To evaluate the results, a wrapper call to ``./bin/evaluate.py`` is produced by the ``./bin/baselines.py --evaluate`` command.
Several types of evaluation can be achieved, see :ref:`bob.bio.base.evaluate` for details.
Particularly, here we can enable ROC curves, DET plots, CMC curves and the computation of EER/HTER.
Hence, the complete set of results of the baseline experiments are generated using:
.. code-block:: sh
$ ./bin/baselines.py --all -vv --evaluate ROC DET CMC HTER
If you specified other parameters for the execution of the algorithms, e.g., the ``--directory`` flag, you have to add these options here as well.
If you ran only a sub-set of the available, the missing algorithms will just be skipped.
The resulting files will be ``ROC.pdf``, ``DET.pdf`` and ``CMC.pdf``, and the HTER results are simply written to console.
For the `AT&T database`_ the results should be as follows:
.. image:: img/ROC.png
:width: 35%
.. image:: img/DET.png
:width: 27%
.. image:: img/CMC.png
:width: 35%
.. table:: The HTER results of the baseline algorithms on the AT&T database
+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
| eigenface | lda | gaborgraph | lgbphs | gmm | isv | plda | bic |
+=============+=============+=============+=============+=============+=============+=============+=============+
| 8.368% | 9.763% | 4.579% | 8.500% | 1.237% | 0.053% | 7.921% | 3.526% |
+-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
.. note::
The ``lrpca`` and ``lda_ir`` algorithms require hand-labeled eye positions to run.
Since the AT&T database does not provide eye positions, it is not possible to provide baseline results on AT&T for these two algorithms.
.. include:: links.rst
......@@ -86,7 +86,7 @@ release = distribution.version
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
#exclude_patterns = ['**/links.rst']
exclude_patterns = ['links.rst', 'references.rst']
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
......@@ -133,12 +133,12 @@ if sphinx.__version__ >= "1.0":
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = ''
html_logo = 'img/logo.png'
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = ''
html_favicon = 'img/favicon.ico'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
......@@ -246,7 +246,7 @@ autodoc_default_flags = ['members', 'undoc-members', 'inherited-members', 'show-
# For inter-documentation mapping:
from bob.extension.utils import link_documentation
intersphinx_mapping = link_documentation(['python', 'numpy', 'bob.io.base', 'bob.db.verification.utils'])
intersphinx_mapping = link_documentation(['python', 'numpy', 'bob.bio.gmm', 'bob.bio.csu'])
def setup(app):
......
======================
Implementation Details
======================
Image preprocessing
-------------------
Image preprocessing is an important stage for face recognition.
In :ref:`bob.bio.face <bob.bio.face>`, several different algorithms to perform photometric enhancement of facial images are implemented.
These algorithms rely on facial images, which are aligned according to the eye locations, and scaled to a specific image resolution.
Face cropping
~~~~~~~~~~~~~
However, for most of the image databases, in the original face images the faces are not aligned, but instead the eye locations are labeled by hand.
Hence, before the photometric enhancement algorithms can be applied, faces must be aligned according to the hand-labeled eye locations.
This can be achieved using the :py:class:`bob.bio.face.preprocessor.FaceCrop` class.
It will take the image and the hand-labeled eye locations and crop the face according to some parameters, which can be defined in its constructor.
So, now we have a preprocessors to perform face cropping, and some preprocessors to perform photometric enhancement.
However, we might want to have a photometric enhancement *on top of* the aligned faces.
In theory, there are several ways to achieve this:
1. Copy the face alignment code into all photometric enhancement classes.
As copying code is generally a bad choice, we drop this option.
2. Use the face cropping as a base class and derive the photometric enhancement classes from it.
This option is worth implementing, and this was the way, the FaceRecLib_ handled preprocessing.
However, it required to copy code inside the configuration files.
This means that, when we want to run on a different image resolution, we need to change all configuration files.
Option 2 dropped.
3. Provide the face cropper as parameter to the photometric enhancement classes.
This option has the advantage that the configuration has to be written only once.
Also, we might change the face cropper to something else later, without needing to the the photometric enhancement code later on.
Option 3 accepted.
Now, we have a closer look into how the image preprocessing is implemented.
Let's take the example of the :py:class:`bob.bio.face.preprocessor.TanTriggs`.
The constructor takes a ``face_cropper`` as parameter.
This ``face_cropper`` can be ``None``, when the images are already aligned.
It can also be a :py:class:`bob.bio.face.preprocessor.FaceCrop` object, which is contains the information, how faces are cropped.
The :py:class:`bob.bio.face.preprocessor.TanTriggs` algorithm will use the ``face_cropper`` to crop the face, by passing the image and the annotations to the :py:meth:`bob.bio.face.preprocessor.FaceCrop.crop_face` function, perform the photometric enhancement on the cropped image, and return the result.
So far, there is no advantage of option 2 over option 3, since the parameters for face cropping still have to be specified in the configuration file.
But now comes the clue: The third option, how a ``face_cropper`` can be passed to the constructor is as a :ref:`Resource <bob.bio.face.preprocessors>` key, such as ``'face-crop-eyes'``.
This will load the face cropping configuration from the registered resource, which has to be generated only once.
So, to generate a TanTriggs preprocessor that performs face cropping, you can create:
.. code-block:: py
preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper = 'face-crop-eyes')
Face detection
~~~~~~~~~~~~~~
Alright.
Now if you have swallowed that, there comes the next step: face detection.
Some of the databases do neither provide hand-labeled eye locations, nor are the images pre-cropped.
However, we want to use the same algorithms on those images as well, so we have to detect the face (and the facial landmarks), crop the face and perform a photometric enhancement.
So, image preprocessing becomes a three stage algorithm.
How to combine the two stages, image alignment and photometric enhancement, we have seen before.
Fortunately, the same technique can be applied for the :py:class:`bob.bio.face.preprocessor.FaceDetect`.
The face detector takes as an input a ``face_cropper``, where we can use the same options to select a face cropper, just that we cannot pass ``None``.
Interestingly, the face detector itself can be used as a ``face_cropper`` inside the photometric enhancement classes.
Hence, to generate a TanTriggs preprocessor that performs face detection, crops the face and performs photometric enhancement, you can create:
.. code-block:: py
preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper = bob.bio.face.preprocessor.FaceDetect(face_cropper = 'face-crop-eyes', use_flandmark = True) )
Or simply (using the face detector :ref:`Resource <bob.bio.face.preprocessors>`):
.. code-block:: py
preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper = 'face-crop-eyes')
.. _bob.bio.face.resources:
Registered Resources
--------------------
.. _bob.bio.face.databases:
Databases
~~~~~~~~~
One important aspect of :ref:`bob.bio.face <bob.bio.face>` is the relatively large list of supported image data sets, including well-defined evaluation protocols.
All databases rely on the :py:class:`bob.bio.base.database.DatabaseBob` interface, which in turn uses the :ref:`verification_databases`.
Please check the link above for information on how to obtain the original data of those data sets.
After downloading and extracting the original data of the data sets, it is necessary that the scripts know, where the data was installed.
For this purpose, the ``./bin/verify.py`` script can read a special file, where those directories are stored, see :ref:`bob.bio.base.installation`.
By default, this file is located in your home directory, but you can specify another file on command line.
The other option is to change the directories directly inside the configuration files.
Here is the list of files and replacement strings for all databases that are registered as resource, in alphabetical order:
* The AT&T database of faces: ``'atnt'``
- Images: ``[YOUR_ATNT_DIRECTORY]``
* AR face: ``'arface'``
- Images: ``[YOUR_ARFACE_DIRECTORY]``
* BANCA (english): ``'banca'``
- Images: [YOUR_BANCA_DIRECTORY]
* CAS-PEAL: ``'caspeal'``
- Images: ``[YOUR_CAS-PEAL_DIRECTORY]``
* Face Recognition Grand Challenge v2 (FRGC): ``'frgc'``