Commit b6e0c468 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'master' into IJB-C-highlevel

parents 55c04295 03e02ecf
Pipeline #20289 failed with stage
in 10 minutes and 31 seconds
......@@ -12,3 +12,4 @@ develop-eggs
sphinx
dist
record.txt
build/
......@@ -53,4 +53,4 @@ development `mailing list`_.
.. Place your references here:
.. _bob: https://www.idiap.ch/software/bob
.. _installation: https://www.idiap.ch/software/bob/install
.. _mailing list: https://www.idiap.ch/software/bob/discuss
.. _mailing list: https://www.idiap.ch/software/bob/discuss
\ No newline at end of file
......@@ -4,6 +4,7 @@ from . import algorithm
from . import script
from . import database
from . import annotator
from . import baseline
from . import test
......
......@@ -38,9 +38,17 @@ def min_face_size_validator(annotations, min_face_size=(32, 32)):
"""
if not annotations:
return False
bbx = bob.ip.facedetect.bounding_box_from_annotation(
source='direct', **annotations)
if bbx.size < min_face_size:
for source in ('direct', 'eyes', None):
try:
bbx = bob.ip.facedetect.bounding_box_from_annotation(
source=source, **annotations)
break
except Exception:
if source is None:
raise
else:
pass
if bbx.size[0] < min_face_size[0] or bbx.size[1] < min_face_size[1]:
return False
return True
......
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
"""
Defining some face recognition baselines
"""
from bob.bio.base.baseline import Baseline
eigenface = Baseline(name="eigenface",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='linearize',
algorithm='pca')
lda = Baseline(name="lda",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='eigenface',
algorithm='lda')
plda = Baseline(name="plda",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='linearize',
algorithm='pca+plda')
gabor_graph = Baseline(name="gabor_graph",
preprocessors={'default': 'inorm-lbp-crop', 'atnt': 'inorm-lbp'},
extractor='grid-graph',
algorithm='gabor-jet')
lgbphs = Baseline(name="lgbphs",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='lgbphs',
algorithm='histogram')
gmm = Baseline(name="gmm",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='gmm')
isv = Baseline(name="isv",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='isv')
ivector = Baseline(name="gmm",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='ivector-cosine')
bic = Baseline(name="bic",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='grid-graph',
algorithm='bic-jets')
......@@ -86,3 +86,7 @@ class MobioBioDatabase(ZTBioDatabase):
def annotations(self, myfile):
return self._db.annotations(myfile._f)
def groups(self, protocol=None, **kwargs):
return self._db.groups(protocol=protocol)
......@@ -2,20 +2,6 @@
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Thu May 24 10:41:42 CEST 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bob.ip.base
import numpy
......@@ -30,71 +16,100 @@ logger = logging.getLogger('bob.bio.face')
class FaceCrop (Base):
"""Crops the face according to the given annotations.
This class is designed to perform a geometric normalization of the face based on the eye locations, using :py:class:`bob.ip.base.FaceEyesNorm`.
Usually, when executing the :py:meth:`crop_face` function, the image and the eye locations have to be specified.
There, the given image will be transformed such that the eye locations will be placed at specific locations in the resulting image.
These locations, as well as the size of the cropped image, need to be specified in the constructor of this class, as ``cropped_positions`` and ``cropped_image_size``.
This class is designed to perform a geometric normalization of the face based
on the eye locations, using :py:class:`bob.ip.base.FaceEyesNorm`. Usually,
when executing the :py:meth:`crop_face` function, the image and the eye
locations have to be specified. There, the given image will be transformed
such that the eye locations will be placed at specific locations in the
resulting image. These locations, as well as the size of the cropped image,
need to be specified in the constructor of this class, as
``cropped_positions`` and ``cropped_image_size``.
Some image databases do not provide eye locations, but rather bounding boxes.
This is not a problem at all.
Simply define the coordinates, where you want your ``cropped_positions`` to be in the cropped image, by specifying the same keys in the dictionary that will be given as ``annotations`` to the :py:meth:`crop_face` function.
Simply define the coordinates, where you want your ``cropped_positions`` to
be in the cropped image, by specifying the same keys in the dictionary that
will be given as ``annotations`` to the :py:meth:`crop_face` function.
.. note;::
These locations can even be outside of the cropped image boundary, i.e., when the crop should be smaller than the annotated bounding boxes.
.. note::
Sometimes, databases provide pre-cropped faces, where the eyes are located at (almost) the same position in all images.
Usually, the cropping does not conform with the cropping that you like (i.e., image resolution is wrong, or too much background information).
However, the database does not provide eye locations (since they are almost identical for all images).
In that case, you can specify the ``fixed_positions`` in the constructor, which will be taken instead of the ``annotations`` inside the :py:meth:`crop_face` function (in which case the ``annotations`` are ignored).
These locations can even be outside of the cropped image boundary, i.e.,
when the crop should be smaller than the annotated bounding boxes.
Sometimes, databases provide pre-cropped faces, where the eyes are located at
(almost) the same position in all images. Usually, the cropping does not
conform with the cropping that you like (i.e., image resolution is wrong, or
too much background information). However, the database does not provide eye
locations (since they are almost identical for all images). In that case, you
can specify the ``fixed_positions`` in the constructor, which will be taken
instead of the ``annotations`` inside the :py:meth:`crop_face` function (in
which case the ``annotations`` are ignored).
Sometimes, the crop of the face is outside of the original image boundaries.
Usually, these pixels will simply be left black, resulting in sharp edges in the image.
However, some feature extractors do not like these sharp edges.
In this case, you can set the ``mask_sigma`` to copy pixels from the valid border of the image and add random noise (see :py:func:`bob.ip.base.extrapolate_mask`).
Usually, these pixels will simply be left black, resulting in sharp edges in
the image. However, some feature extractors do not like these sharp edges. In
this case, you can set the ``mask_sigma`` to copy pixels from the valid
border of the image and add random noise (see
:py:func:`bob.ip.base.extrapolate_mask`).
**Parameters:**
Parameters
----------
cropped_image_size : (int, int)
The size of the resulting cropped images.
The resolution of the cropped image, in order (HEIGHT,WIDTH); if not given,
no face cropping will be performed
cropped_positions : dict
The coordinates in the cropped image, where the annotated points should be put to.
This parameter is a dictionary with usually two elements, e.g., ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : ...}`` are supported, as long as the ``annotations`` in the `__call__` function are present.
The coordinates in the cropped image, where the annotated points should be
put to. This parameter is a dictionary with usually two elements, e.g.,
``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
However, also other parameters, such as ``{'topleft' : ..., 'bottomright' :
...}`` are supported, as long as the ``annotations`` in the `__call__`
function are present.
fixed_positions : dict or None
If specified, ignore the annotations from the database and use these fixed positions throughout.
If specified, ignore the annotations from the database and use these fixed
positions throughout.
mask_sigma : float or None
Fill the area outside of image boundaries with random pixels from the border, by adding noise to the pixel values.
To disable extrapolation, set this value to ``None``.
To disable adding random noise, set it to a negative value or 0.
Fill the area outside of image boundaries with random pixels from the
border, by adding noise to the pixel values. To disable extrapolation, set
this value to ``None``. To disable adding random noise, set it to a
negative value or 0.
mask_neighbors : int
The number of neighbors used during mask extrapolation.
See :py:func:`bob.ip.base.extrapolate_mask` for details.
The number of neighbors used during mask extrapolation. See
:py:func:`bob.ip.base.extrapolate_mask` for details.
mask_seed : int or None
The random seed to apply for mask extrapolation.
.. warning::
When run in parallel, the same random seed will be applied to all parallel processes.
Hence, results of parallel execution will differ from the results in serial execution.
When run in parallel, the same random seed will be applied to all
parallel processes. Hence, results of parallel execution will differ
from the results in serial execution.
annotator : :any:`bob.bio.base.annotator.Annotator`
If provided, the annotator will be used if the required annotations are
missing.
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
Remaining keyword parameters passed to the :py:class:`Base` constructor,
such as ``color_channel`` or ``dtype``.
"""
def __init__(
self,
cropped_image_size, # resolution of the cropped image, in order (HEIGHT,WIDTH); if not given, no face cropping will be performed
cropped_positions, # dictionary of the cropped positions, usually: {'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}
fixed_positions = None, # dictionary of FIXED positions in the original image; if specified, annotations from the database will be ignored
mask_sigma = None, # The sigma for random values areas outside image
mask_neighbors = 5, # The number of neighbors to consider while extrapolating
mask_seed = None, # The seed for generating random values during extrapolation
**kwargs # parameters to be written in the __str__ method
cropped_image_size,
cropped_positions,
fixed_positions=None,
mask_sigma=None,
mask_neighbors=5,
mask_seed=None,
annotator=None,
**kwargs
):
Base.__init__(self, **kwargs)
......@@ -102,12 +117,12 @@ class FaceCrop (Base):
# call base class constructor
Preprocessor.__init__(
self,
cropped_image_size = cropped_image_size,
cropped_positions = cropped_positions,
fixed_positions = fixed_positions,
mask_sigma = mask_sigma,
mask_neighbors = mask_neighbors,
mask_seed = mask_seed
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
mask_sigma=mask_sigma,
mask_neighbors=mask_neighbors,
mask_seed=mask_seed
)
# check parameters
......@@ -122,44 +137,58 @@ class FaceCrop (Base):
self.fixed_positions = fixed_positions
self.mask_sigma = mask_sigma
self.mask_neighbors = mask_neighbors
self.mask_rng = bob.core.random.mt19937(mask_seed) if mask_seed is not None else bob.core.random.mt19937()
self.mask_rng = bob.core.random.mt19937(
mask_seed) if mask_seed is not None else bob.core.random.mt19937()
self.annotator = annotator
# create objects required for face cropping
self.cropper = bob.ip.base.FaceEyesNorm(crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]])
self.cropper = bob.ip.base.FaceEyesNorm(
crop_size=cropped_image_size,
right_eye=cropped_positions[self.cropped_keys[0]],
left_eye=cropped_positions[self.cropped_keys[1]])
self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
def crop_face(self, image, annotations=None):
"""Crops the face.
Executes the face cropping on the given image and returns the cropped
version of it.
def crop_face(self, image, annotations = None):
"""crop_face(image, annotations = None) -> face
Executes the face cropping on the given image and returns the cropped version of it.
**Parameters:**
Parameters
----------
image : 2D :py:class:`numpy.ndarray`
The face image to be processed.
The face image to be processed.
annotations : dict or ``None``
The annotations that fit to the given image.
``None`` is only accepted, when ``fixed_positions`` were specified in the constructor.
**Returns:**
The annotations that fit to the given image. ``None`` is only accepted,
when ``fixed_positions`` were specified in the constructor.
Returns
-------
face : 2D :py:class:`numpy.ndarray` (float)
The cropped face.
The cropped face.
Raises
------
ValueError
If the annotations is None.
"""
if self.fixed_positions is not None:
annotations = self.fixed_positions
if annotations is None:
raise ValueError("Cannot perform image cropping since annotations are not given, and no fixed annotations are specified.")
raise ValueError(
"Cannot perform image cropping since annotations are not given, and "
"no fixed annotations are specified.")
assert isinstance(annotations, dict)
if not all(k in annotations for k in self.cropped_keys):
raise ValueError("At least one of the expected annotations '%s' are not given in '%s'." % (self.cropped_keys, annotations.keys()))
raise ValueError(
"At least one of the expected annotations '%s' are not given "
"in '%s'." % (self.cropped_keys, annotations.keys()))
# create output
mask = numpy.ones(image.shape[-2:], dtype=numpy.bool)
shape = self.cropped_image_size if image.ndim == 2 else [image.shape[0]] + list(self.cropped_image_size)
shape = self.cropped_image_size if image.ndim == 2 else [
image.shape[0]] + list(self.cropped_image_size)
cropped_image = numpy.zeros(shape)
self.cropped_mask[:] = False
......@@ -167,51 +196,78 @@ class FaceCrop (Base):
self.cropper(
image, # input image
mask, # full input mask
cropped_image, # cropped image
cropped_image, # cropped image
self.cropped_mask, # cropped mask
right_eye = annotations[self.cropped_keys[0]], # position of first annotation, usually right eye
left_eye = annotations[self.cropped_keys[1]] # position of second annotation, usually left eye
# position of first annotation, usually right eye
right_eye=annotations[self.cropped_keys[0]],
# position of second annotation, usually left eye
left_eye=annotations[self.cropped_keys[1]]
)
if self.mask_sigma is not None:
# extrapolate the mask so that pixels outside of the image original image region are filled with border pixels
# extrapolate the mask so that pixels outside of the image original image
# region are filled with border pixels
if cropped_image.ndim == 2:
bob.ip.base.extrapolate_mask(self.cropped_mask, cropped_image, self.mask_sigma, self.mask_neighbors, self.mask_rng)
bob.ip.base.extrapolate_mask(
self.cropped_mask, cropped_image, self.mask_sigma,
self.mask_neighbors, self.mask_rng)
else:
[bob.ip.base.extrapolate_mask(self.cropped_mask, cropped_image_channel, self.mask_sigma, self.mask_neighbors, self.mask_rng) for cropped_image_channel in cropped_image]
[bob.ip.base.extrapolate_mask(
self.cropped_mask, cropped_image_channel, self.mask_sigma,
self.mask_neighbors, self.mask_rng)
for cropped_image_channel in cropped_image]
return cropped_image
def is_annotations_valid(self, annotations):
if not annotations:
return False
# check if the required keys are available
return all(key in annotations for key in self.cropped_keys)
def __call__(self, image, annotations = None):
"""__call__(image, annotations = None) -> face
Aligns the given image according to the given annotations.
def __call__(self, image, annotations=None):
"""Aligns the given image according to the given annotations.
First, the desired color channel is extracted from the given image.
Afterward, the face is cropped, according to the given ``annotations`` (or to ``fixed_positions``, see :py:meth:`crop_face`).
Finally, the resulting face is converted to the desired data type.
**Parameters:**
Afterward, the face is cropped, according to the given ``annotations`` (or
to ``fixed_positions``, see :py:meth:`crop_face`). Finally, the resulting
face is converted to the desired data type.
Parameters
----------
image : 2D or 3D :py:class:`numpy.ndarray`
The face image to be processed.
The face image to be processed.
annotations : dict or ``None``
The annotations that fit to the given image.
**Returns:**
The annotations that fit to the given image.
Returns
-------
face : 2D :py:class:`numpy.ndarray`
The cropped face.
The cropped face.
"""
if not annotations and not self.fixed_positions:
logger.warn("Cannot crop face without annotations or fixed_positions. Returning None.")
# if annotations are missing and cannot do anything else return None.
if not self.is_annotations_valid(annotations) and \
not self.fixed_positions and \
self.annotator is None:
logger.warn("Cannot crop face without annotations or fixed_positions "
"or an annotator. Returning None.")
return None
# convert to the desired color channel
image = self.color_channel(image)
# annotate the image if annotations are missing
if not self.is_annotations_valid(annotations) and \
not self.fixed_positions and \
self.annotator is not None:
annotations = self.annotator(image, annotations=annotations)
if not self.is_annotations_valid(annotations):
logger.warn("The annotator failed and the annotations are missing too"
". Returning None.")
return None
# crop face
image = self.crop_face(image, annotations)
# convert data type
return self.data_type(image)
from __future__ import print_function
import subprocess
import os
import sys
import argparse
import bob.bio.base
import bob.extension
import bob.core
logger = bob.core.log.setup("bob.bio.face")
# This is the default set of algorithms that can be run using this script.
all_databases = bob.bio.base.resource_keys('database')
# check, which databases can actually be assessed
available_databases = []
for database in all_databases:
try:
bob.bio.base.load_resource(database, 'database')
available_databases.append(database)
except:
pass
# collect all algorithms that we provide baselines for
all_algorithms = ['eigenface', 'lda', 'gabor-graph', 'lgbphs', 'plda', 'bic']
try:
# try if GMM-based algorithms are available
bob.bio.base.load_resource('gmm', 'algorithm')
bob.bio.base.load_resource('isv', 'algorithm')
bob.bio.base.load_resource('ivector-cosine', 'algorithm')
all_algorithms += ['gmm', 'isv', 'ivector']
except:
print("Could not load the GMM-based algorithms. Did you specify bob.bio.gmm in your config file?")
try:
# try if the CSU extension is enabled
bob.bio.base.load_resource('lrpca', 'algorithm')
bob.bio.base.load_resource('lda-ir', 'algorithm')
all_algorithms += ['lrpca', 'lda-ir']
except:
print("Could not load the algorithms from the CSU resources. Did you specify bob.bio.csu in your config file?")
def command_line_arguments(command_line_parameters):
"""Defines the command line parameters that are accepted."""
# create parser
parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
# add parameters
# - the algorithm to execute
parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('eigenface',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.')
parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.')
# - the database to choose
parser.add_argument('-d', '--database', choices = available_databases, default = 'atnt', help = 'The database on which the baseline algorithm is executed.')
# - the database to choose
parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
# - the directories to write to
parser.add_argument('-T', '--temp-directory', help = 'The directory to write temporary the data of the experiment into. If not specified, the default directory of the verify.py script is used (see verify.py --help).')
parser.add_argument('-R', '--result-directory', help = 'The directory to write the resulting score files of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).')
# - use the Idiap grid -- option is only useful if you are at Idiap
parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
# - run in parallel on the local machine
parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads')
# - perform ZT-normalization
parser.add_argument('-z', '--zt-norm', action = 'store_true', help = 'Compute the ZT norm for the files (might not be availabe for all databases).')
# - just print?
parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.')
# - evaluate the algorithm (after it has finished)
parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.')
# - other parameters that are passed to the underlying script
parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.')
bob.core.log.add_command_line_option(parser)
args = parser.parse_args(command_line_parameters)
if args.all:
args.algorithms = all_algorithms
bob.core.log.set_verbosity_level(logger, args.verbose)
return args
# In these functions, some default experiments are prepared.
# An experiment consists of three configuration files:
# - The features to be extracted
# - The algorithm to be run
# - The grid configuration that it requires (only used when the --grid option is chosen)
CONFIGURATIONS = {
'eigenface' : dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'linearize',
algorithm = 'pca',
),
'lda': dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'eigenface',
algorithm = 'lda',
),
'plda': dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'linearize',
algorithm = 'pca+plda',
grid = 'demanding'
),
'gabor-graph': dict(
preprocessor = ('inorm-lbp-crop', 'inorm-lbp'),
extractor = 'grid-graph',
algorithm = 'gabor-jet',
),
'lgbphs': dict(
preprocessor = ('tan-triggs-crop', 'tan-triggs'),
extractor = 'lgbphs',
algorithm = 'histogram',
),
'bic': dict(
preprocessor = ('face-crop-eyes', 'base'),
extractor = 'grid-graph',
algorithm = 'bic-jets',
grid = 'demanding'
),
'gmm': dict(
preprocessor = ('tan-triggs-crop', 'tan-triggs'),
extractor = 'dct-blocks',
algorithm = 'gmm',
grid = 'demanding',
script = 'verify_gmm.py'
),