diff --git a/.gitignore b/.gitignore index f25d410d48fd4cbeb448e9cb88d26881828d107e..21e7c55cb619380276529b7c3ce9099559779151 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,4 @@ develop-eggs sphinx dist record.txt +build/ diff --git a/README.rst b/README.rst index fa8b082c4663260cee05498f3b7b16775d6571da..0018ca42d5295d398461e0fa8465c750a482416f 100644 --- a/README.rst +++ b/README.rst @@ -53,4 +53,4 @@ development `mailing list`_. .. Place your references here: .. _bob: https://www.idiap.ch/software/bob .. _installation: https://www.idiap.ch/software/bob/install -.. _mailing list: https://www.idiap.ch/software/bob/discuss +.. _mailing list: https://www.idiap.ch/software/bob/discuss \ No newline at end of file diff --git a/bob/bio/face/__init__.py b/bob/bio/face/__init__.py index 4f3858c5fbc180e24a8368de4c55b4e90e73e3c1..d73edf6121f7d88d596fb1292f1bbf97e727d0e2 100644 --- a/bob/bio/face/__init__.py +++ b/bob/bio/face/__init__.py @@ -4,6 +4,7 @@ from . import algorithm from . import script from . import database from . import annotator +from . import baseline from . import test diff --git a/bob/bio/face/annotator/__init__.py b/bob/bio/face/annotator/__init__.py index 8c556a0e3504a280e8432fc51bb2feecf286b2d9..9804d7d43d4db4d14c6f041cb141a85ece730dc5 100644 --- a/bob/bio/face/annotator/__init__.py +++ b/bob/bio/face/annotator/__init__.py @@ -38,9 +38,17 @@ def min_face_size_validator(annotations, min_face_size=(32, 32)): """ if not annotations: return False - bbx = bob.ip.facedetect.bounding_box_from_annotation( - source='direct', **annotations) - if bbx.size < min_face_size: + for source in ('direct', 'eyes', None): + try: + bbx = bob.ip.facedetect.bounding_box_from_annotation( + source=source, **annotations) + break + except Exception: + if source is None: + raise + else: + pass + if bbx.size[0] < min_face_size[0] or bbx.size[1] < min_face_size[1]: return False return True diff --git a/bob/bio/face/baseline/__init__.py b/bob/bio/face/baseline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d14c35ed018a0111acdb6fe21af527af47aeeb0 --- /dev/null +++ b/bob/bio/face/baseline/__init__.py @@ -0,0 +1 @@ +__all__ = [_ for _ in dir() if not _.startswith('_')] diff --git a/bob/bio/face/baseline/baseline.py b/bob/bio/face/baseline/baseline.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a76e53fddb1c4237de035f85f07dbc5707ec28 --- /dev/null +++ b/bob/bio/face/baseline/baseline.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# Tiago de Freitas Pereira <tiago.pereira@idiap.ch> + +""" +Defining some face recognition baselines +""" + +from bob.bio.base.baseline import Baseline + +eigenface = Baseline(name="eigenface", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='linearize', + algorithm='pca') + +lda = Baseline(name="lda", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='eigenface', + algorithm='lda') + +plda = Baseline(name="plda", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='linearize', + algorithm='pca+plda') + + +gabor_graph = Baseline(name="gabor_graph", + preprocessors={'default': 'inorm-lbp-crop', 'atnt': 'inorm-lbp'}, + extractor='grid-graph', + algorithm='gabor-jet') + +lgbphs = Baseline(name="lgbphs", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='lgbphs', + algorithm='histogram') + +gmm = Baseline(name="gmm", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='dct-blocks', + algorithm='gmm') + +isv = Baseline(name="isv", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='dct-blocks', + algorithm='isv') + +ivector = Baseline(name="gmm", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='dct-blocks', + algorithm='ivector-cosine') + +bic = Baseline(name="bic", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='grid-graph', + algorithm='bic-jets') diff --git a/bob/bio/face/database/mobio.py b/bob/bio/face/database/mobio.py index 0f4589d57900e02d1d1264a844f8641ef8f7b920..f09b4fe49ff8da219cf449513944793de97a2d29 100644 --- a/bob/bio/face/database/mobio.py +++ b/bob/bio/face/database/mobio.py @@ -86,3 +86,7 @@ class MobioBioDatabase(ZTBioDatabase): def annotations(self, myfile): return self._db.annotations(myfile._f) + + def groups(self, protocol=None, **kwargs): + return self._db.groups(protocol=protocol) + diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py index 091c2b8c31630ef96132906e20acd5896765baf4..131bbe3ae716624242c2e29d69e6e9ce5fecc5dc 100644 --- a/bob/bio/face/preprocessor/FaceCrop.py +++ b/bob/bio/face/preprocessor/FaceCrop.py @@ -2,20 +2,6 @@ # vim: set fileencoding=utf-8 : # @author: Manuel Guenther <Manuel.Guenther@idiap.ch> # @date: Thu May 24 10:41:42 CEST 2012 -# -# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, version 3 of the License. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see <http://www.gnu.org/licenses/>. import bob.ip.base import numpy @@ -30,71 +16,100 @@ logger = logging.getLogger('bob.bio.face') class FaceCrop (Base): """Crops the face according to the given annotations. - This class is designed to perform a geometric normalization of the face based on the eye locations, using :py:class:`bob.ip.base.FaceEyesNorm`. - Usually, when executing the :py:meth:`crop_face` function, the image and the eye locations have to be specified. - There, the given image will be transformed such that the eye locations will be placed at specific locations in the resulting image. - These locations, as well as the size of the cropped image, need to be specified in the constructor of this class, as ``cropped_positions`` and ``cropped_image_size``. + This class is designed to perform a geometric normalization of the face based + on the eye locations, using :py:class:`bob.ip.base.FaceEyesNorm`. Usually, + when executing the :py:meth:`crop_face` function, the image and the eye + locations have to be specified. There, the given image will be transformed + such that the eye locations will be placed at specific locations in the + resulting image. These locations, as well as the size of the cropped image, + need to be specified in the constructor of this class, as + ``cropped_positions`` and ``cropped_image_size``. Some image databases do not provide eye locations, but rather bounding boxes. This is not a problem at all. - Simply define the coordinates, where you want your ``cropped_positions`` to be in the cropped image, by specifying the same keys in the dictionary that will be given as ``annotations`` to the :py:meth:`crop_face` function. + Simply define the coordinates, where you want your ``cropped_positions`` to + be in the cropped image, by specifying the same keys in the dictionary that + will be given as ``annotations`` to the :py:meth:`crop_face` function. - .. note;:: - These locations can even be outside of the cropped image boundary, i.e., when the crop should be smaller than the annotated bounding boxes. + .. note:: - Sometimes, databases provide pre-cropped faces, where the eyes are located at (almost) the same position in all images. - Usually, the cropping does not conform with the cropping that you like (i.e., image resolution is wrong, or too much background information). - However, the database does not provide eye locations (since they are almost identical for all images). - In that case, you can specify the ``fixed_positions`` in the constructor, which will be taken instead of the ``annotations`` inside the :py:meth:`crop_face` function (in which case the ``annotations`` are ignored). + These locations can even be outside of the cropped image boundary, i.e., + when the crop should be smaller than the annotated bounding boxes. + + Sometimes, databases provide pre-cropped faces, where the eyes are located at + (almost) the same position in all images. Usually, the cropping does not + conform with the cropping that you like (i.e., image resolution is wrong, or + too much background information). However, the database does not provide eye + locations (since they are almost identical for all images). In that case, you + can specify the ``fixed_positions`` in the constructor, which will be taken + instead of the ``annotations`` inside the :py:meth:`crop_face` function (in + which case the ``annotations`` are ignored). Sometimes, the crop of the face is outside of the original image boundaries. - Usually, these pixels will simply be left black, resulting in sharp edges in the image. - However, some feature extractors do not like these sharp edges. - In this case, you can set the ``mask_sigma`` to copy pixels from the valid border of the image and add random noise (see :py:func:`bob.ip.base.extrapolate_mask`). + Usually, these pixels will simply be left black, resulting in sharp edges in + the image. However, some feature extractors do not like these sharp edges. In + this case, you can set the ``mask_sigma`` to copy pixels from the valid + border of the image and add random noise (see + :py:func:`bob.ip.base.extrapolate_mask`). - **Parameters:** + Parameters + ---------- cropped_image_size : (int, int) - The size of the resulting cropped images. + The resolution of the cropped image, in order (HEIGHT,WIDTH); if not given, + no face cropping will be performed cropped_positions : dict - The coordinates in the cropped image, where the annotated points should be put to. - This parameter is a dictionary with usually two elements, e.g., ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``. - However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : ...}`` are supported, as long as the ``annotations`` in the `__call__` function are present. + The coordinates in the cropped image, where the annotated points should be + put to. This parameter is a dictionary with usually two elements, e.g., + ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``. + However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : + ...}`` are supported, as long as the ``annotations`` in the `__call__` + function are present. fixed_positions : dict or None - If specified, ignore the annotations from the database and use these fixed positions throughout. + If specified, ignore the annotations from the database and use these fixed + positions throughout. mask_sigma : float or None - Fill the area outside of image boundaries with random pixels from the border, by adding noise to the pixel values. - To disable extrapolation, set this value to ``None``. - To disable adding random noise, set it to a negative value or 0. + Fill the area outside of image boundaries with random pixels from the + border, by adding noise to the pixel values. To disable extrapolation, set + this value to ``None``. To disable adding random noise, set it to a + negative value or 0. mask_neighbors : int - The number of neighbors used during mask extrapolation. - See :py:func:`bob.ip.base.extrapolate_mask` for details. + The number of neighbors used during mask extrapolation. See + :py:func:`bob.ip.base.extrapolate_mask` for details. mask_seed : int or None The random seed to apply for mask extrapolation. .. warning:: - When run in parallel, the same random seed will be applied to all parallel processes. - Hence, results of parallel execution will differ from the results in serial execution. + + When run in parallel, the same random seed will be applied to all + parallel processes. Hence, results of parallel execution will differ + from the results in serial execution. + + annotator : :any:`bob.bio.base.annotator.Annotator` + If provided, the annotator will be used if the required annotations are + missing. kwargs - Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``. + Remaining keyword parameters passed to the :py:class:`Base` constructor, + such as ``color_channel`` or ``dtype``. """ def __init__( self, - cropped_image_size, # resolution of the cropped image, in order (HEIGHT,WIDTH); if not given, no face cropping will be performed - cropped_positions, # dictionary of the cropped positions, usually: {'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)} - fixed_positions = None, # dictionary of FIXED positions in the original image; if specified, annotations from the database will be ignored - mask_sigma = None, # The sigma for random values areas outside image - mask_neighbors = 5, # The number of neighbors to consider while extrapolating - mask_seed = None, # The seed for generating random values during extrapolation - **kwargs # parameters to be written in the __str__ method + cropped_image_size, + cropped_positions, + fixed_positions=None, + mask_sigma=None, + mask_neighbors=5, + mask_seed=None, + annotator=None, + **kwargs ): Base.__init__(self, **kwargs) @@ -102,12 +117,12 @@ class FaceCrop (Base): # call base class constructor Preprocessor.__init__( self, - cropped_image_size = cropped_image_size, - cropped_positions = cropped_positions, - fixed_positions = fixed_positions, - mask_sigma = mask_sigma, - mask_neighbors = mask_neighbors, - mask_seed = mask_seed + cropped_image_size=cropped_image_size, + cropped_positions=cropped_positions, + fixed_positions=fixed_positions, + mask_sigma=mask_sigma, + mask_neighbors=mask_neighbors, + mask_seed=mask_seed ) # check parameters @@ -122,44 +137,58 @@ class FaceCrop (Base): self.fixed_positions = fixed_positions self.mask_sigma = mask_sigma self.mask_neighbors = mask_neighbors - self.mask_rng = bob.core.random.mt19937(mask_seed) if mask_seed is not None else bob.core.random.mt19937() + self.mask_rng = bob.core.random.mt19937( + mask_seed) if mask_seed is not None else bob.core.random.mt19937() + self.annotator = annotator # create objects required for face cropping - self.cropper = bob.ip.base.FaceEyesNorm(crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]]) + self.cropper = bob.ip.base.FaceEyesNorm( + crop_size=cropped_image_size, + right_eye=cropped_positions[self.cropped_keys[0]], + left_eye=cropped_positions[self.cropped_keys[1]]) self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool) + def crop_face(self, image, annotations=None): + """Crops the face. + Executes the face cropping on the given image and returns the cropped + version of it. - def crop_face(self, image, annotations = None): - """crop_face(image, annotations = None) -> face - - Executes the face cropping on the given image and returns the cropped version of it. - - **Parameters:** - + Parameters + ---------- image : 2D :py:class:`numpy.ndarray` - The face image to be processed. + The face image to be processed. annotations : dict or ``None`` - The annotations that fit to the given image. - ``None`` is only accepted, when ``fixed_positions`` were specified in the constructor. - - **Returns:** + The annotations that fit to the given image. ``None`` is only accepted, + when ``fixed_positions`` were specified in the constructor. + Returns + ------- face : 2D :py:class:`numpy.ndarray` (float) - The cropped face. + The cropped face. + + Raises + ------ + ValueError + If the annotations is None. """ if self.fixed_positions is not None: annotations = self.fixed_positions if annotations is None: - raise ValueError("Cannot perform image cropping since annotations are not given, and no fixed annotations are specified.") + raise ValueError( + "Cannot perform image cropping since annotations are not given, and " + "no fixed annotations are specified.") assert isinstance(annotations, dict) if not all(k in annotations for k in self.cropped_keys): - raise ValueError("At least one of the expected annotations '%s' are not given in '%s'." % (self.cropped_keys, annotations.keys())) + raise ValueError( + "At least one of the expected annotations '%s' are not given " + "in '%s'." % (self.cropped_keys, annotations.keys())) # create output mask = numpy.ones(image.shape[-2:], dtype=numpy.bool) - shape = self.cropped_image_size if image.ndim == 2 else [image.shape[0]] + list(self.cropped_image_size) + shape = self.cropped_image_size if image.ndim == 2 else [ + image.shape[0]] + list(self.cropped_image_size) cropped_image = numpy.zeros(shape) self.cropped_mask[:] = False @@ -167,51 +196,78 @@ class FaceCrop (Base): self.cropper( image, # input image mask, # full input mask - cropped_image, # cropped image + cropped_image, # cropped image self.cropped_mask, # cropped mask - right_eye = annotations[self.cropped_keys[0]], # position of first annotation, usually right eye - left_eye = annotations[self.cropped_keys[1]] # position of second annotation, usually left eye + # position of first annotation, usually right eye + right_eye=annotations[self.cropped_keys[0]], + # position of second annotation, usually left eye + left_eye=annotations[self.cropped_keys[1]] ) if self.mask_sigma is not None: - # extrapolate the mask so that pixels outside of the image original image region are filled with border pixels + # extrapolate the mask so that pixels outside of the image original image + # region are filled with border pixels if cropped_image.ndim == 2: - bob.ip.base.extrapolate_mask(self.cropped_mask, cropped_image, self.mask_sigma, self.mask_neighbors, self.mask_rng) + bob.ip.base.extrapolate_mask( + self.cropped_mask, cropped_image, self.mask_sigma, + self.mask_neighbors, self.mask_rng) else: - [bob.ip.base.extrapolate_mask(self.cropped_mask, cropped_image_channel, self.mask_sigma, self.mask_neighbors, self.mask_rng) for cropped_image_channel in cropped_image] - + [bob.ip.base.extrapolate_mask( + self.cropped_mask, cropped_image_channel, self.mask_sigma, + self.mask_neighbors, self.mask_rng) + for cropped_image_channel in cropped_image] return cropped_image + def is_annotations_valid(self, annotations): + if not annotations: + return False + # check if the required keys are available + return all(key in annotations for key in self.cropped_keys) - def __call__(self, image, annotations = None): - """__call__(image, annotations = None) -> face - - Aligns the given image according to the given annotations. + def __call__(self, image, annotations=None): + """Aligns the given image according to the given annotations. First, the desired color channel is extracted from the given image. - Afterward, the face is cropped, according to the given ``annotations`` (or to ``fixed_positions``, see :py:meth:`crop_face`). - Finally, the resulting face is converted to the desired data type. - - **Parameters:** + Afterward, the face is cropped, according to the given ``annotations`` (or + to ``fixed_positions``, see :py:meth:`crop_face`). Finally, the resulting + face is converted to the desired data type. + Parameters + ---------- image : 2D or 3D :py:class:`numpy.ndarray` - The face image to be processed. - + The face image to be processed. annotations : dict or ``None`` - The annotations that fit to the given image. - - **Returns:** + The annotations that fit to the given image. + Returns + ------- face : 2D :py:class:`numpy.ndarray` - The cropped face. + The cropped face. """ - if not annotations and not self.fixed_positions: - logger.warn("Cannot crop face without annotations or fixed_positions. Returning None.") + # if annotations are missing and cannot do anything else return None. + if not self.is_annotations_valid(annotations) and \ + not self.fixed_positions and \ + self.annotator is None: + logger.warn("Cannot crop face without annotations or fixed_positions " + "or an annotator. Returning None.") return None + # convert to the desired color channel image = self.color_channel(image) + + # annotate the image if annotations are missing + if not self.is_annotations_valid(annotations) and \ + not self.fixed_positions and \ + self.annotator is not None: + annotations = self.annotator(image, annotations=annotations) + if not self.is_annotations_valid(annotations): + logger.warn("The annotator failed and the annotations are missing too" + ". Returning None.") + return None + # crop face image = self.crop_face(image, annotations) + # convert data type return self.data_type(image) diff --git a/bob/bio/face/script/baselines.py b/bob/bio/face/script/baselines.py deleted file mode 100755 index 0b7219130866cde61a9d4e9684a5aa07d3cca209..0000000000000000000000000000000000000000 --- a/bob/bio/face/script/baselines.py +++ /dev/null @@ -1,366 +0,0 @@ -from __future__ import print_function - -import subprocess -import os -import sys -import argparse - -import bob.bio.base -import bob.extension - -import bob.core -logger = bob.core.log.setup("bob.bio.face") - -# This is the default set of algorithms that can be run using this script. -all_databases = bob.bio.base.resource_keys('database') -# check, which databases can actually be assessed -available_databases = [] - -for database in all_databases: - try: - bob.bio.base.load_resource(database, 'database') - available_databases.append(database) - except: - pass - -# collect all algorithms that we provide baselines for -all_algorithms = ['eigenface', 'lda', 'gabor-graph', 'lgbphs', 'plda', 'bic'] - -try: - # try if GMM-based algorithms are available - bob.bio.base.load_resource('gmm', 'algorithm') - bob.bio.base.load_resource('isv', 'algorithm') - bob.bio.base.load_resource('ivector-cosine', 'algorithm') - all_algorithms += ['gmm', 'isv', 'ivector'] -except: - print("Could not load the GMM-based algorithms. Did you specify bob.bio.gmm in your config file?") - -try: - # try if the CSU extension is enabled - bob.bio.base.load_resource('lrpca', 'algorithm') - bob.bio.base.load_resource('lda-ir', 'algorithm') - all_algorithms += ['lrpca', 'lda-ir'] -except: - print("Could not load the algorithms from the CSU resources. Did you specify bob.bio.csu in your config file?") - - -def command_line_arguments(command_line_parameters): - """Defines the command line parameters that are accepted.""" - - # create parser - parser = argparse.ArgumentParser(description='Execute baseline algorithms with default parameters', formatter_class=argparse.ArgumentDefaultsHelpFormatter) - - # add parameters - # - the algorithm to execute - parser.add_argument('-a', '--algorithms', choices = all_algorithms, default = ('eigenface',), nargs = '+', help = 'Select one (or more) algorithms that you want to execute.') - parser.add_argument('--all', action = 'store_true', help = 'Select all algorithms.') - # - the database to choose - parser.add_argument('-d', '--database', choices = available_databases, default = 'atnt', help = 'The database on which the baseline algorithm is executed.') - # - the database to choose - parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.') - # - the directories to write to - parser.add_argument('-T', '--temp-directory', help = 'The directory to write temporary the data of the experiment into. If not specified, the default directory of the verify.py script is used (see verify.py --help).') - parser.add_argument('-R', '--result-directory', help = 'The directory to write the resulting score files of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).') - - # - use the Idiap grid -- option is only useful if you are at Idiap - parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.') - # - run in parallel on the local machine - parser.add_argument('-l', '--parallel', type=int, help = 'Run the algorithms in parallel on the local machine, using the given number of parallel threads') - # - perform ZT-normalization - parser.add_argument('-z', '--zt-norm', action = 'store_true', help = 'Compute the ZT norm for the files (might not be availabe for all databases).') - - # - just print? - parser.add_argument('-q', '--dry-run', action = 'store_true', help = 'Just print the commands, but do not execute them.') - - # - evaluate the algorithm (after it has finished) - parser.add_argument('-e', '--evaluate', nargs='+', choices = ('EER', 'HTER', 'ROC', 'DET', 'CMC', 'RR'), help = 'Evaluate the results of the algorithms (instead of running them) using the given evaluation techniques.') - - # - other parameters that are passed to the underlying script - parser.add_argument('parameters', nargs = argparse.REMAINDER, help = 'Parameters directly passed to the verify.py script.') - - bob.core.log.add_command_line_option(parser) - args = parser.parse_args(command_line_parameters) - if args.all: - args.algorithms = all_algorithms - - bob.core.log.set_verbosity_level(logger, args.verbose) - - return args - - -# In these functions, some default experiments are prepared. -# An experiment consists of three configuration files: -# - The features to be extracted -# - The algorithm to be run -# - The grid configuration that it requires (only used when the --grid option is chosen) - -CONFIGURATIONS = { - 'eigenface' : dict( - preprocessor = ('face-crop-eyes', 'base'), - extractor = 'linearize', - algorithm = 'pca', - ), - - 'lda': dict( - preprocessor = ('face-crop-eyes', 'base'), - extractor = 'eigenface', - algorithm = 'lda', - ), - - 'plda': dict( - preprocessor = ('face-crop-eyes', 'base'), - extractor = 'linearize', - algorithm = 'pca+plda', - grid = 'demanding' - ), - - 'gabor-graph': dict( - preprocessor = ('inorm-lbp-crop', 'inorm-lbp'), - extractor = 'grid-graph', - algorithm = 'gabor-jet', - ), - - 'lgbphs': dict( - preprocessor = ('tan-triggs-crop', 'tan-triggs'), - extractor = 'lgbphs', - algorithm = 'histogram', - ), - - 'bic': dict( - preprocessor = ('face-crop-eyes', 'base'), - extractor = 'grid-graph', - algorithm = 'bic-jets', - grid = 'demanding' - ), - - 'gmm': dict( - preprocessor = ('tan-triggs-crop', 'tan-triggs'), - extractor = 'dct-blocks', - algorithm = 'gmm', - grid = 'demanding', - script = 'verify_gmm.py' - ), - - 'isv': dict( - preprocessor = ('tan-triggs-crop', 'tan-triggs'), - extractor = 'dct-blocks', - algorithm = 'isv', - grid = 'demanding', - script = 'verify_isv.py' - ), - - 'ivector': dict( - preprocessor = ('tan-triggs-crop', 'tan-triggs'), - extractor = 'dct-blocks', - algorithm = 'ivector-cosine', - grid = 'demanding', - script = 'verify_ivector.py' - ), - - 'lrpca': dict( - preprocessor = ('lrpca', None), - extractor = 'lrpca', - algorithm = 'lrpca' - ), - - 'lda-ir': dict( - preprocessor = ('lda-ir', None), - extractor = 'lda-ir', - algorithm = 'lda-ir' - ) -} - -def _get_executable(script): - executables = bob.extension.find_executable(script, prefixes = [os.path.dirname(sys.argv[0]), 'bin']) - if not len(executables): - raise IOError("Could not find the '%s' executable." % script) - executable = executables[0] - assert os.path.isfile(executable) - return executable - - -def main(command_line_parameters = None): - - # Collect command line arguments - args = command_line_arguments(command_line_parameters) - - # Check the database configuration file - has_eyes = args.database != 'atnt' - has_zt_norm = args.database in ('banca', 'mobio-female', 'mobio-image', 'mobio-male', 'multipie', 'scface') - has_eval = args.database in ('banca', 'mobio-female', 'mobio-image', 'mobio-male', 'multipie', 'scface', 'xm2vts') - - if not args.evaluate: - - # execution of the job is requested - for algorithm in args.algorithms: - logger.info("Executing algorithm '%s'", algorithm) - - # get the setup for the desired algorithm - import copy - setup = copy.deepcopy(CONFIGURATIONS[algorithm]) - if 'grid' not in setup: setup['grid'] = 'grid' - if 'script' not in setup or (not args.grid and args.parallel is None): setup['script'] = 'verify.py' - - # select the preprocessor - setup['preprocessor'] = setup['preprocessor'][0 if has_eyes else 1] - if setup['preprocessor'] is None: - logger.warn("Skipping algorithm '%s' since no preprocessor is found that matches the given databases' '%s' configuration", algorithm, args.database) - - - # this is the default sub-directory that is used - sub_directory = os.path.join(args.baseline_directory, algorithm) - - executable = _get_executable(setup['script']) - - # create the command to the faceverify script - command = [ - executable, - '--database', args.database, - '--preprocessor', setup['preprocessor'], - '--extractor', setup['extractor'], - '--algorithm', setup['algorithm'], - '--sub-directory', sub_directory - ] - - # add grid argument, if available - if args.grid: - command += ['--grid', setup['grid'], '--stop-on-failure'] - - if args.parallel is not None: - command += ['--grid', 'bob.bio.base.grid.Grid("local", number_of_parallel_processes=%d)' % args.parallel, '--run-local-scheduler', '--stop-on-failure'] - - # compute ZT-norm if the database provides this setup - if has_zt_norm and args.zt_norm: - command += ['--zt-norm'] - - # compute results for both 'dev' and 'eval' group if the database provides these - if has_eval: - command += ['--groups', 'dev', 'eval'] - - # set the directories, if desired - if args.temp_directory is not None: - command += ['--temp-directory', os.path.join(args.temp_directory)] - if args.result_directory is not None: - command += ['--result-directory', os.path.join(args.result_directory)] - - # set the verbosity level - if args.verbose: - command += ['-' + 'v'*args.verbose] - - # add the command line arguments that were specified on command line - if args.parameters: - command += args.parameters[1:] - - # print the command so that it can easily be re-issued - logger.info("Executing command:\n%s", bob.bio.base.tools.command_line(command)) - - # run the command - if not args.dry_run: - subprocess.call(command) - - else: - # call the evaluate script with the desired parameters - - # get the base directory of the results - is_idiap = os.path.isdir("/idiap") - if args.result_directory is None: - args.result_directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results" - if not os.path.exists(args.result_directory): - if not args.dry_run: - raise IOError("The result directory '%s' cannot be found. Please specify the --result-directory as it was specified during execution of the algorithms." % args.result_directory) - - # get the result directory of the database - result_dir = os.path.join(args.result_directory, args.baseline_directory) - if not os.path.exists(result_dir): - if not args.dry_run: - raise IOError("The result directory '%s' for the desired experiment cannot be found. Did you already run the experiments?" % result_dir) - - # iterate over the algorithms and collect the result files - result_dev = [] - result_eval = [] - result_zt_dev = [] - result_zt_eval = [] - legends = [] - - # evaluate the results - for algorithm in args.algorithms: - if not os.path.exists(os.path.join(result_dir, algorithm)): - logger.warn("Skipping algorithm '%s' since the results cannot be found.", algorithm) - continue - protocols = [d for d in os.listdir(os.path.join(result_dir, algorithm)) if os.path.isdir(os.path.join(result_dir, algorithm, d))] - if not len(protocols): - logger.warn("Skipping algorithm '%s' since the results cannot be found.", algorithm) - continue - if len(protocols) > 1: - # load the default protocol of the database - protocol = bob.bio.base.load_resource(args.database, "database").protocol - if protocol not in protocols: - protocol = protocols[0] - logger.warn("There are several protocols found in directory '%s'. Here, we use protocol '%s'.", os.path.join(result_dir, algorithm), protocols[0]) - else: - protocol = protocols[0] - - nonorm_sub_dir = os.path.join(algorithm, protocol, 'nonorm') - ztnorm_sub_dir = os.path.join(algorithm, protocol, 'ztnorm') - - # collect the resulting files - if os.path.exists(os.path.join(result_dir, nonorm_sub_dir, 'scores-dev')): - result_dev.append(os.path.join(nonorm_sub_dir, 'scores-dev')) - legends.append(algorithm) - - if has_eval and os.path.exists(os.path.join(result_dir, nonorm_sub_dir, 'scores-eval')): - result_eval.append(os.path.join(nonorm_sub_dir, 'scores-eval')) - - if has_zt_norm: - if os.path.exists(os.path.join(result_dir, ztnorm_sub_dir, 'scores-dev')): - result_zt_dev.append(os.path.join(ztnorm_sub_dir, 'scores-dev')) - if has_eval and os.path.exists(os.path.join(result_dir, ztnorm_sub_dir, 'scores-eval')): - result_zt_eval.append(os.path.join(ztnorm_sub_dir, 'scores-eval')) - - # check if we have found some results - if not result_dev and not args.dry_run: - logger.warn("No result files were detected -- skipping evaluation.") - return - - executable = _get_executable('evaluate.py') - - # call the evaluate script - base_command = [executable, '--directory', result_dir, '--legends'] + legends - if 'EER' in args.evaluate: - base_command += ['--criterion', 'EER'] - elif 'HTER' in args.evaluate: - base_command += ['--criterion', 'HTER'] - if 'ROC' in args.evaluate: - base_command += ['--roc', 'ROCxxx.pdf'] - if 'DET' in args.evaluate: - base_command += ['--det', 'DETxxx.pdf'] - if 'CMC' in args.evaluate: - base_command += ['--cmc', 'CMCxxx.pdf'] - if 'RR' in args.evaluate: - base_command += ['--rr'] - if args.verbose: - base_command += ['-' + 'v'*args.verbose] - - # first, run the nonorm evaluation - if result_zt_dev: - command = [cmd.replace('xxx','_nonorm') for cmd in base_command] - else: - command = [cmd.replace('xxx','') for cmd in base_command] - command += ['--dev-files'] + result_dev - if result_eval: - command += ['--eval-files'] + result_eval - - logger.info("Executing command (nonorm):\n%s", bob.bio.base.tools.command_line(command)) - if not args.dry_run: - subprocess.call(command) - - # now, also run the ZT norm evaluation, if available - if result_zt_dev: - command = [cmd.replace('xxx','_ztnorm') for cmd in base_command] - command += ['--dev-files'] + result_zt_dev - if result_zt_eval: - command += ['--eval-files'] + result_zt_eval - - logger.info("Executing command (ztnorm):\n%s", bob.bio.base.tools.command_line(command)) - if not args.dry_run: - subprocess.call(command) diff --git a/bob/bio/face/test/test_annotators.py b/bob/bio/face/test/test_annotators.py index 7ba3082087ccdd3951f2bb9b075f6f99c5d79797..d5e153410a70f0a4197f70e17b8af205f858b9f6 100644 --- a/bob/bio/face/test/test_annotators.py +++ b/bob/bio/face/test/test_annotators.py @@ -51,7 +51,13 @@ def test_min_face_size_validator(): not_valid = { 'topleft': (0, 0), - 'bottomright': (28, 32), + 'bottomright': (28, 33), + } + assert not min_face_size_validator(not_valid) + + not_valid = { + 'topleft': (0, 0), + 'bottomright': (33, 28), } assert not min_face_size_validator(not_valid) diff --git a/bob/bio/face/test/test_scripts.py b/bob/bio/face/test/test_scripts.py index 04bee2a509b96e3e32431c290f4174498874e48e..64e315dda85849233caecdf80bff3f5a7955f7b3 100644 --- a/bob/bio/face/test/test_scripts.py +++ b/bob/bio/face/test/test_scripts.py @@ -1,28 +1,6 @@ import bob.bio.base.test.utils import bob.bio.face -@bob.bio.base.test.utils.grid_available -def test_baselines(): - # test that all of the baselines would execute - from bob.bio.face.script.baselines import available_databases, all_algorithms, main - - with bob.bio.base.test.utils.Quiet(): - for database in available_databases: - parameters = ['-d', database, '--dry-run'] - main(parameters) - parameters.append('--grid') - main(parameters) - parameters.extend(['-e', 'HTER']) - main(parameters) - - for algorithm in all_algorithms: - parameters = ['-a', algorithm, '--dry-run'] - main(parameters) - parameters.append('-g') - main(parameters) - parameters.extend(['-e', 'HTER']) - main(parameters) - def test_display_annotations(): from bob.bio.face.script.display_face_annotations import main diff --git a/conda/meta.yaml b/conda/meta.yaml index ec19f16277d90cb152568c57da6b1ff2188f8618..6c76ca5fb1832848f2b02b735f32019062edf669 100644 --- a/conda/meta.yaml +++ b/conda/meta.yaml @@ -7,7 +7,6 @@ package: build: entry_points: - - baselines.py = bob.bio.face.script.baselines:main - display_face_annotations.py = bob.bio.face.script.display_face_annotations:main number: {{ environ.get('BOB_BUILD_NUMBER', 0) }} run_exports: @@ -56,7 +55,6 @@ test: imports: - {{ name }} commands: - - baselines.py --help - display_face_annotations.py --help - nosetests --with-coverage --cover-package={{ name }} -sv {{ name }} - sphinx-build -aEW {{ project_dir }}/doc {{ project_dir }}/sphinx diff --git a/doc/baselines.rst b/doc/baselines.rst index 3f60912f0fcd1f03b9a2bb3d1163402f37f24924..42858cf2c3631e18bbd5f845f719da11b3513b68 100644 --- a/doc/baselines.rst +++ b/doc/baselines.rst @@ -24,42 +24,42 @@ How this is done is explained in more detail in the :ref:`bob.bio.base.installat Running Baseline Experiments ---------------------------- -To run the baseline experiments, you can use the ``baselines.py`` script by just going to the console and typing: +To run the baseline experiments, you can use the ``bob bio baseline`` script by just going to the console and typing: .. code-block:: sh - $ baselines.py + $ bob bio baseline <baseline> <database> This script is a simple wrapper for the ``verify.py`` script that is explained in more detail in :ref:`bob.bio.base.experiments`. -The ``baselines.py --help`` option shows you, which other options you have. +The ``bob bio baseline --help`` option shows you, which other options you have. Here is an almost complete extract: -* ``--database``: The database and protocol you want to use. - By default this is set to the image database *atnt*. -* ``--algorithms``: The recognition algorithms that you want to execute. - By default, only the *eigenface* algorithm is executed. -* ``--all``: Execute all algorithms that are implemented. +* ``<baseline>``: The recognition algorithms that you want to execute. +* ``<database>``: The database and protocol you want to use. * ``--temp-directory``: The directory where temporary files of the experiments are put to. * ``--result-directory``: The directory where resulting score files of the experiments are put to. -* ``--evaluate``: After running the experiments, the resulting score files will be evaluated, and the result is written to console. -* ``--dry-run``: Instead of executing the algorithm (or the evaluation), only print the command that would have been executed. * ``--verbose``: Increase the verbosity level of the script. By default, only the commands that are executed are printed, and the rest of the calculation runs quietly. You can increase the verbosity by adding the ``--verbose`` parameter repeatedly (up to three times). -Usually it is a good idea to have at least verbose level 2 (i.e., calling ``baselines.py --verbose --verbose``, or the short version ``baselines.py -vv``). +Usually it is a good idea to have at least verbose level 2 (i.e., calling ``bob bio baseline --verbose --verbose``, or the short version ``bob bio baseline -vv``). + + +You can find the list of readily available baselines using the ``resources.py`` +command: + +.. code-block:: sh + + $ resources.py --types baseline + Running in Parallel ~~~~~~~~~~~~~~~~~~~ To run the experiments in parallel, as usual you can define an SGE grid configuration, or run with parallel threads on the local machine. -For the ``baselines.py`` script, the grid configuration is adapted to each of the algorithms. Hence, to run in the SGE grid, you can simply add the ``--grid`` command line option, without parameters. Similarly, to run the experiments in parallel on the local machine, simply add a ``--parallel <N>`` option, where ``<N>`` specifies the number of parallel jobs you want to execute. -When running the algorithms from the :ref:`bob.bio.gmm <bob.bio.gmm>` package in parallel, the specialized scripts are executed. -This will speed up the training of the UBM (and possible additional steps) tremendously. - The Algorithms -------------- @@ -98,9 +98,6 @@ The algorithms present an (incomplete) set of state-of-the-art face recognition - feature : :py:class:`bob.bio.face.extractor.GridGraph` - algorithm : :py:class:`bob.bio.base.algorithm.BIC` -.. note:: - The ``plda`` algorithm is currently under construction and the setup is not yet useful. - Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` package is installed: @@ -125,67 +122,94 @@ Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` pack .. note:: The ``ivector`` algorithm needs a lot of training data and fails on small databases such as the `AT&T database`_. - -Additionally, the following algorithms can be executed, when the :ref:`bob.bio.csu <bob.bio.csu>` package is installed. - -* ``lrpca``: In Local Region PCA [PBD11]_, the face is sub-divided into local regions and a PCA is performed for each local region. - - - preprocessor : :py:class:`bob.bio.csu.preprocessor.LRPCA` - - feature : :py:class:`bob.bio.csu.extractor.LRPCA` - - algorithm : :py:class:`bob.bio.csu.algorithm.LRPCA` - -* ``lda-ir``: The LDA-IR (a.k.a. CohortLDA [LBP12]_) extracts color information from images after, and computes a PCA+LDA projection on two color layers. - - - preprocessor : :py:class:`bob.bio.csu.preprocessor.LDAIR` - - feature : :py:class:`bob.bio.csu.extractor.LDAIR` - - algorithm : :py:class:`bob.bio.csu.algorithm.LDAIR` - -.. note:: - The ``lrpca`` and ``lda-ir`` algorithms require hand-labeled eye locations. - Therefore, they can not be run on the default ``atnt`` database. - .. _bob.bio.base.baseline_results: Baseline Results ---------------- -To evaluate the results, a wrapper call to ``evaluate.py`` is produced by the ``baselines.py --evaluate`` command. -Several types of evaluation can be achieved, see :ref:`bob.bio.base.evaluate` for details. -Particularly, here we can enable ROC curves, DET plots, CMC curves and the computation of EER/HTER. -Hence, the complete set of results of the baseline experiments are generated using: +Let's trigger the ``bob bio baseline`` script to run the baselines on the ATnT dataset: .. code-block:: sh - $ baselines.py --all -vv --evaluate ROC DET CMC HTER + $ bob bio baseline eigenface atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline lda atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline gabor_graph atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline gmm atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline isv atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline plda atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline bic atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> -If you specified other parameters for the execution of the algorithms, e.g., the ``--directory`` flag, you have to add these options here as well. -If you ran only a sub-set of the available, the missing algorithms will just be skipped. -The resulting files will be ``ROC.pdf``, ``DET.pdf`` and ``CMC.pdf``, and the HTER results are simply written to console. -For the `AT&T database`_ the results should be as follows: +Then, to evaluate the results, in terms of HTER, the script ``bob bio metrics`` should be executed as the following. -.. image:: img/ROC.png - :width: 35% -.. image:: img/DET.png - :width: 27% -.. image:: img/CMC.png - :width: 35% + +.. code-block:: sh + + $ bob bio metrics <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation +The aforementioned script will produce in the console the HTERs below for each baseline under the ATnT database: + .. table:: The HTER results of the baseline algorithms on the AT&T database +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ | eigenface | lda | gaborgraph | lgbphs | gmm | isv | plda | bic | +=============+=============+=============+=============+=============+=============+=============+=============+ - | 8.368% | 9.763% | 4.579% | 8.500% | 0.684% | 0.421% | 7.921% | 3.526% | + | 9.0% | 12.8% | 6.0% | 9.0% | 1.0% | 0.1% | 10.8% | 4.0% | +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ -.. note:: - The results for ``gmm`` and ``isv`` were run with the parallelized scripts. - Though the results obtained with the sequential script should be similar, it might be that they are not identical. -.. note:: - The ``lrpca`` and ``lda-ir`` algorithms require hand-labeled eye positions to run. - Since the AT&T database does not provide eye positions, it is not possible to provide baseline results on AT&T for these two algorithms. +Several types of evaluation can be executed, see ``bob bio --help`` for details. +Particularly, here we can enable ROC curves, DET plots and CMC curves. + +.. code-block:: sh + + $ bob bio roc <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation \ + -o ROC.pdf + + $ bob bio det <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation \ + -o DET.pdf + + $ bob bio cmc <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation \ + -o CMC.pdf + + +For the `AT&T database`_ the results should be as follows: + +.. image:: img/ROC.png + :width: 35% +.. image:: img/DET.png + :width: 27% +.. image:: img/CMC.png + :width: 35% + .. include:: links.rst diff --git a/doc/extra-intersphinx.txt b/doc/extra-intersphinx.txt index c82f95f8fd7a78b61d0116246d7a6f2ff9a69a65..bf1097ebbeb8c2be2f37cafc1f3f8a6e4b937fd1 100644 --- a/doc/extra-intersphinx.txt +++ b/doc/extra-intersphinx.txt @@ -6,7 +6,6 @@ bob.ip.gabor bob.ip.base bob.bio.gmm bob.bio.video -bob.bio.csu bob.bio.spear bob.db.lfw bob.ip.facedetect diff --git a/doc/img/CMC.png b/doc/img/CMC.png index 040e374242919317a45daa1994f359acdf48dcff..d1bb3a52d2ee2d8445061ff2b9a5d035db211499 100644 Binary files a/doc/img/CMC.png and b/doc/img/CMC.png differ diff --git a/doc/img/DET.png b/doc/img/DET.png index e0732263822c3b480b6c35f32a002a1c42e7cf6a..e6130ce95998753960e5f8d6af13918ca0bd7690 100644 Binary files a/doc/img/DET.png and b/doc/img/DET.png differ diff --git a/doc/img/ROC.png b/doc/img/ROC.png index fbb17b175ad6cc730a75fa895d9d036ce9cc0aac..46ca262a197868a72e54e7bda3688804000722df 100644 Binary files a/doc/img/ROC.png and b/doc/img/ROC.png differ diff --git a/doc/index.rst b/doc/index.rst index 361114f346038cc84fba6ef2377f65e823ac6a9c..7a6cb6865d0044602032e7649b0ff0b228ebce2e 100644 --- a/doc/index.rst +++ b/doc/index.rst @@ -21,7 +21,6 @@ Additionally, a set of baseline algorithms are defined, which integrate well wit * :ref:`bob.bio.gmm <bob.bio.gmm>` defines algorithms based on Gaussian mixture models * :ref:`bob.bio.video <bob.bio.video>` uses face recognition algorithms in video frames -* :ref:`bob.bio.csu <bob.bio.csu>` provides wrapper classes of the `CSU Face Recognition Resources <http://www.cs.colostate.edu/facerec>`_ (only Python 2.7 compatible) For more detailed information about the structure of the ``bob.bio`` packages, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`. Particularly, the installation of this and other ``bob.bio`` packages, please read the :ref:`bob.bio.base.installation`. diff --git a/doc/links.rst b/doc/links.rst index b055ac96611f9b7a16225ab016cfb5bac080d119..f4b19432bcd96a26a5d5b4cb666698191953ec17 100644 --- a/doc/links.rst +++ b/doc/links.rst @@ -14,7 +14,5 @@ .. _nist: http://www.nist.gov/itl/iad/ig/focs.cfm .. _pypi: http://pypi.python.org .. _sge: http://wiki.idiap.ch/linux/SunGridEngine -.. _csu face recognition resources: http://www.cs.colostate.edu/facerec -.. _xfacereclib.extension.csu: http://pypi.python.org/pypi/xfacereclib.extension.CSU .. _virtualbox: https://www.virtualbox.org .. _hdf5: http://www.hdfgroup.org/HDF5 diff --git a/doc/references.rst b/doc/references.rst index c1cfb2c8065d144dcb8318475cd38b503abbb989..22fb8ffc61e6e09e7706923be6d9501f3eb254d5 100644 --- a/doc/references.rst +++ b/doc/references.rst @@ -9,8 +9,6 @@ References .. [TP91] *M. Turk and A. Pentland*. **Eigenfaces for recognition**. Journal of Cognitive Neuroscience, 3(1):71-86, 1991. .. [ZKC98] *W. Zhao, A. Krishnaswamy, R. Chellappa, D. Swets and J. Weng*. **Discriminant analysis of principal components for face recognition**, pages 73-85. Springer Verlag Berlin, 1998. .. [MWP98] *B. Moghaddam, W. Wahid and A. Pentland*. **Beyond eigenfaces: probabilistic matching for face recognition**. IEEE International Conference on Automatic Face and Gesture Recognition, pages 30-35. 1998. -.. [PBD11] *P.J. Phillips, J.R. Beveridge, B.A. Draper, G. Givens, A.J. O'Toole, D.S. Bolme, J. Dunlop, Y.M. Lui, H. Sahibzada and S. Weimer*. **An introduction to the good, the bad, & the ugly face recognition challenge problem**. Automatic face gesture recognition and workshops (FG 2011), pages 346-353. 2011. -.. [LBP12] *Y.M. Lui, D. Bolme, P.J. Phillips, J.R. Beveridge and B.A. Draper*. **Preliminary studies on the good, the bad, and the ugly face recognition challenge problem**. Computer vision and pattern recognition workshops (CVPRW), pages 9-16. 2012. .. [GHW12] *M. Günther, D. Haufe and R.P. Würtz*. **Face recognition with disparity corrected Gabor phase differences**. In Artificial neural networks and machine learning, volume 7552 of Lecture Notes in Computer Science, pages 411-418. 9/2012. .. [ZSG05] *W. Zhang, S. Shan, W. Gao, X. Chen and H. Zhang*. **Local Gabor binary pattern histogram sequence (LGBPHS): a novel non-statistical model for face representation and recognition**. Computer Vision, IEEE International Conference on, 1:786-791, 2005. .. [MM09] *C. McCool, S. Marcel*. **Parts-based face verification using local frequency bands**. In Advances in biometrics, volume 5558 of Lecture Notes in Computer Science. 2009. diff --git a/setup.py b/setup.py index efc501a2ecf85ecbe05ee6bba0ff92af4bc88375..81e0030725ef311e9c0748e02d6a6d412aa975f7 100644 --- a/setup.py +++ b/setup.py @@ -189,6 +189,20 @@ setup( 'histogram = bob.bio.face.config.algorithm.histogram:algorithm', # LGBPHS histograms 'bic-jets = bob.bio.face.config.algorithm.bic_jets:algorithm', # BIC on gabor jets ], + + #baselines + 'bob.bio.baseline':[ + 'eigenface = bob.bio.face.baseline.baseline:eigenface', + 'lda = bob.bio.face.baseline.baseline:lda', + 'plda = bob.bio.face.baseline.baseline:plda', + 'gabor_graph = bob.bio.face.baseline.baseline:gabor_graph', + 'lgbphs = bob.bio.face.baseline.baseline:lgbphs', + 'gmm = bob.bio.face.baseline.baseline:gmm', + 'isv = bob.bio.face.baseline.baseline:isv', + 'ivector = bob.bio.face.baseline.baseline:ivector', + 'bic = bob.bio.face.baseline.baseline:bic', + ], + }, # Classifiers are important if you plan to distribute this package through