diff --git a/bob/bio/face/__init__.py b/bob/bio/face/__init__.py index 4f3858c5fbc180e24a8368de4c55b4e90e73e3c1..d73edf6121f7d88d596fb1292f1bbf97e727d0e2 100644 --- a/bob/bio/face/__init__.py +++ b/bob/bio/face/__init__.py @@ -4,6 +4,7 @@ from . import algorithm from . import script from . import database from . import annotator +from . import baseline from . import test diff --git a/bob/bio/face/baseline/__init__.py b/bob/bio/face/baseline/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..7d14c35ed018a0111acdb6fe21af527af47aeeb0 --- /dev/null +++ b/bob/bio/face/baseline/__init__.py @@ -0,0 +1 @@ +__all__ = [_ for _ in dir() if not _.startswith('_')] diff --git a/bob/bio/face/baseline/baseline.py b/bob/bio/face/baseline/baseline.py new file mode 100644 index 0000000000000000000000000000000000000000..e8a76e53fddb1c4237de035f85f07dbc5707ec28 --- /dev/null +++ b/bob/bio/face/baseline/baseline.py @@ -0,0 +1,55 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# Tiago de Freitas Pereira <tiago.pereira@idiap.ch> + +""" +Defining some face recognition baselines +""" + +from bob.bio.base.baseline import Baseline + +eigenface = Baseline(name="eigenface", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='linearize', + algorithm='pca') + +lda = Baseline(name="lda", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='eigenface', + algorithm='lda') + +plda = Baseline(name="plda", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='linearize', + algorithm='pca+plda') + + +gabor_graph = Baseline(name="gabor_graph", + preprocessors={'default': 'inorm-lbp-crop', 'atnt': 'inorm-lbp'}, + extractor='grid-graph', + algorithm='gabor-jet') + +lgbphs = Baseline(name="lgbphs", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='lgbphs', + algorithm='histogram') + +gmm = Baseline(name="gmm", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='dct-blocks', + algorithm='gmm') + +isv = Baseline(name="isv", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='dct-blocks', + algorithm='isv') + +ivector = Baseline(name="gmm", + preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'}, + extractor='dct-blocks', + algorithm='ivector-cosine') + +bic = Baseline(name="bic", + preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'}, + extractor='grid-graph', + algorithm='bic-jets') diff --git a/bob/bio/face/script/face.py b/bob/bio/face/script/face.py deleted file mode 100644 index 7006fe8fdf26fa1bdcc3f4b5294f328f55240e9e..0000000000000000000000000000000000000000 --- a/bob/bio/face/script/face.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# Tiago de Freitas Pereira <tiago.pereira@idiap.ch> - -""" -This script runs some face recognition baselines under some face databases - -Run `bob bio face baselines --help` to list all the baselines and databases available - -Examples: - - To run the LDA baseline in the mobio-male database do: - bob bio face baselines --database mobio-male --baseline lda - - To run the GMM baseline in the mobio-male database do: - bob bio face baselines --database mobio-male --baseline gmm - -""" - -from bob.extension.scripts.click_helper import verbosity_option -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) -from click_plugins import with_plugins -import click -import os -import pkg_resources -import bob.bio.base -from bob.bio.base.script.verify import main as verify - -def get_available_databases(): - """ - Get all the available databases through the database entry-points - """ - - available_databases = dict() - all_databases = bob.bio.base.resource_keys('database') - for database in all_databases: - try: - database_entry_point = bob.bio.base.load_resource(database, 'database') - - available_databases[database] = dict() - - # Checking if the database has data for the ZT normalization - available_databases[database]["has_zt"] = hasattr(database_entry_point, "zobjects") and hasattr(database_entry_point, "tobjects") - available_databases[database]["groups"] = [] - # Searching for database groups - try: - groups = list(database_entry_point.groups()) - for g in ["dev", "eval"]: - available_databases[database]["groups"] += [g] if g in groups else [] - except: - # In case the method groups is not implemented - available_databases[database]["groups"] = ["dev"] - except: - pass - return available_databases - -# LIST OF REGISTERED BASELINES -BASELINES = { - 'eigenface': dict( - preprocessor = 'face-crop-eyes', - extractor = 'linearize', - algorithm = 'pca', - ), - - 'lda': dict( - preprocessor = 'face-crop-eyes', - extractor = 'eigenface', - algorithm = 'lda', - ), - 'plda': dict( - preprocessor = 'face-crop-eyes', - extractor = 'linearize', - algorithm = 'pca+plda', - grid = 'demanding', - ), - 'gabor-graph': dict( - preprocessor = 'inorm-lbp-crop', - extractor = 'grid-graph', - algorithm = 'gabor-jet', - ), - 'lgbphs': dict( - preprocessor = 'tan-triggs-crop', - extractor = 'lgbphs', - algorithm = 'histogram', - ), - 'gmm': dict( - preprocessor = 'tan-triggs-crop', - extractor = 'dct-blocks', - algorithm = 'gmm', - grid = 'demanding', - ), - 'isv': dict( - preprocessor = 'tan-triggs-crop', - extractor = 'dct-blocks', - algorithm = 'isv', - grid = 'demanding', - ), - 'ivector': dict( - preprocessor = 'tan-triggs-crop', - extractor = 'dct-blocks', - algorithm = 'ivector', - grid = 'demanding', - ), - } - - -DATABASES = [d for d in get_available_databases().keys()] - -@with_plugins(pkg_resources.iter_entry_points('bob.bio.cli.bio')) -@click.group() -def face(): - """Entry point to run Face Recognition Algorithms - - Check it out https://www.idiap.ch/software/bob/docs/bob/bob.bio.face/stable/index.html - - """ - pass - - -@face.command(entry_point_group='bob.bio.config', cls=ConfigCommand) -@click.option('--baseline', '-b', required=True, cls=ResourceOption, help="Registered baseline", type=click.Choice(BASELINES)) -@click.option('--database', '-d', required=True, cls=ResourceOption, help="Registered database", type=click.Choice(DATABASES)) -@click.option('--temp-directory', '-T', required=False, cls=ResourceOption, help="The directory to write temporary the data of the experiment into. If not specified, the default directory of the verify.py script is used (see verify.py --help).") -@click.option('--result-directory', '-R', required=False, cls=ResourceOption, help="The directory to write the resulting score files of the experiment into. If not specified, the default directories of the verify.py script are used (see verify.py --help).") -@click.option('--grid', '-g', help="Execute the algorithm in the SGE grid.", is_flag=True) -@click.option('--zt-norm', '-z', help="Enable the computation of ZT norms (if the database supports it).", is_flag=True) -def baselines(baseline, database, temp_directory, result_directory, grid, zt_norm, **kwargs): - """ - This script runs some face recognition baselines under some face databases - - Run `bob bio face baselines --help` to list all the baselines and databases available - - Examples: - - To run the LDA baseline in the mobio-male database do: - bob bio face baselines --database mobio-male --baseline lda - - To run the GMM baseline in the mobio-male database do: - bob bio face baselines --database mobio-male --baseline gmm - """ - - # this is the default sub-directory that is used - sub_directory = os.path.join(database, baseline) - database_data = get_available_databases()[database] - parameters = [ - '-p', BASELINES[baseline]["preprocessor"], - '-e', BASELINES[baseline]["extractor"], - '-d', database, - '-a', BASELINES[baseline]["algorithm"], - '-vvv', - '--temp-directory', temp_directory, - '--result-directory', result_directory, - '--sub-directory', sub_directory - ] - - parameters += ['--groups'] + database_data["groups"] - - if grid: - if 'grid' in BASELINES[baseline].keys(): - parameters += ['-g', BASELINES[baseline]["grid"]] - else: - parameters += ['-g', 'grid'] - - if zt_norm and 'has_zt' in database_data.keys(): - parameters += ['--zt-norm'] - - verify(parameters) diff --git a/doc/baselines.rst b/doc/baselines.rst index 6df5fdbe599671b36bef2bdfda781d646c22bb17..42858cf2c3631e18bbd5f845f719da11b3513b68 100644 --- a/doc/baselines.rst +++ b/doc/baselines.rst @@ -24,42 +24,42 @@ How this is done is explained in more detail in the :ref:`bob.bio.base.installat Running Baseline Experiments ---------------------------- -To run the baseline experiments, you can use the ``baselines.py`` script by just going to the console and typing: +To run the baseline experiments, you can use the ``bob bio baseline`` script by just going to the console and typing: .. code-block:: sh - $ baselines.py + $ bob bio baseline <baseline> <database> This script is a simple wrapper for the ``verify.py`` script that is explained in more detail in :ref:`bob.bio.base.experiments`. -The ``baselines.py --help`` option shows you, which other options you have. +The ``bob bio baseline --help`` option shows you, which other options you have. Here is an almost complete extract: -* ``--database``: The database and protocol you want to use. - By default this is set to the image database *atnt*. -* ``--algorithms``: The recognition algorithms that you want to execute. - By default, only the *eigenface* algorithm is executed. -* ``--all``: Execute all algorithms that are implemented. +* ``<baseline>``: The recognition algorithms that you want to execute. +* ``<database>``: The database and protocol you want to use. * ``--temp-directory``: The directory where temporary files of the experiments are put to. * ``--result-directory``: The directory where resulting score files of the experiments are put to. -* ``--evaluate``: After running the experiments, the resulting score files will be evaluated, and the result is written to console. -* ``--dry-run``: Instead of executing the algorithm (or the evaluation), only print the command that would have been executed. * ``--verbose``: Increase the verbosity level of the script. By default, only the commands that are executed are printed, and the rest of the calculation runs quietly. You can increase the verbosity by adding the ``--verbose`` parameter repeatedly (up to three times). -Usually it is a good idea to have at least verbose level 2 (i.e., calling ``baselines.py --verbose --verbose``, or the short version ``baselines.py -vv``). +Usually it is a good idea to have at least verbose level 2 (i.e., calling ``bob bio baseline --verbose --verbose``, or the short version ``bob bio baseline -vv``). + + +You can find the list of readily available baselines using the ``resources.py`` +command: + +.. code-block:: sh + + $ resources.py --types baseline + Running in Parallel ~~~~~~~~~~~~~~~~~~~ To run the experiments in parallel, as usual you can define an SGE grid configuration, or run with parallel threads on the local machine. -For the ``baselines.py`` script, the grid configuration is adapted to each of the algorithms. Hence, to run in the SGE grid, you can simply add the ``--grid`` command line option, without parameters. Similarly, to run the experiments in parallel on the local machine, simply add a ``--parallel <N>`` option, where ``<N>`` specifies the number of parallel jobs you want to execute. -When running the algorithms from the :ref:`bob.bio.gmm <bob.bio.gmm>` package in parallel, the specialized scripts are executed. -This will speed up the training of the UBM (and possible additional steps) tremendously. - The Algorithms -------------- @@ -98,9 +98,6 @@ The algorithms present an (incomplete) set of state-of-the-art face recognition - feature : :py:class:`bob.bio.face.extractor.GridGraph` - algorithm : :py:class:`bob.bio.base.algorithm.BIC` -.. note:: - The ``plda`` algorithm is currently under construction and the setup is not yet useful. - Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` package is installed: @@ -130,43 +127,89 @@ Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` pack Baseline Results ---------------- -To evaluate the results, a wrapper call to ``evaluate.py`` is produced by the ``baselines.py --evaluate`` command. -Several types of evaluation can be achieved, see :ref:`bob.bio.base.evaluate` for details. -Particularly, here we can enable ROC curves, DET plots, CMC curves and the computation of EER/HTER. -Hence, the complete set of results of the baseline experiments are generated using: +Let's trigger the ``bob bio baseline`` script to run the baselines on the ATnT dataset: .. code-block:: sh - $ baselines.py --all -vv --evaluate ROC DET CMC HTER + $ bob bio baseline eigenface atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline lda atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline gabor_graph atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline gmm atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline isv atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline plda atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> + $ bob bio baseline bic atnt -vv -T <TEMP_DIR> -R <RESULT_DIR> -If you specified other parameters for the execution of the algorithms, e.g., the ``--directory`` flag, you have to add these options here as well. -If you ran only a sub-set of the available, the missing algorithms will just be skipped. -The resulting files will be ``ROC.pdf``, ``DET.pdf`` and ``CMC.pdf``, and the HTER results are simply written to console. -For the `AT&T database`_ the results should be as follows: +Then, to evaluate the results, in terms of HTER, the script ``bob bio metrics`` should be executed as the following. -.. image:: img/ROC.png - :width: 35% -.. image:: img/DET.png - :width: 27% -.. image:: img/CMC.png - :width: 35% +.. code-block:: sh + + $ bob bio metrics <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation + + +The aforementioned script will produce in the console the HTERs below for each baseline under the ATnT database: .. table:: The HTER results of the baseline algorithms on the AT&T database +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ | eigenface | lda | gaborgraph | lgbphs | gmm | isv | plda | bic | +=============+=============+=============+=============+=============+=============+=============+=============+ - | 8.368% | 9.763% | 4.579% | 8.500% | 0.684% | 0.421% | 7.921% | 3.526% | + | 9.0% | 12.8% | 6.0% | 9.0% | 1.0% | 0.1% | 10.8% | 4.0% | +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+ -.. note:: - The results for ``gmm`` and ``isv`` were run with the parallelized scripts. - Though the results obtained with the sequential script should be similar, it might be that they are not identical. -.. note:: - The ``lrpca`` and ``lda-ir`` algorithms require hand-labeled eye positions to run. - Since the AT&T database does not provide eye positions, it is not possible to provide baseline results on AT&T for these two algorithms. +Several types of evaluation can be executed, see ``bob bio --help`` for details. +Particularly, here we can enable ROC curves, DET plots and CMC curves. + +.. code-block:: sh + + $ bob bio roc <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation \ + -o ROC.pdf + + $ bob bio det <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation \ + -o DET.pdf + + $ bob bio cmc <RESULT_DIR>/atnt/eigenface/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gabor_graph/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/lgbphs/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/gmm/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/isv/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/plda/Default/nonorm/scores-dev \ + <RESULT_DIR>/atnt/bic/Default/nonorm/scores-dev --no-evaluation \ + -o CMC.pdf + + +For the `AT&T database`_ the results should be as follows: + +.. image:: img/ROC.png + :width: 35% +.. image:: img/DET.png + :width: 27% +.. image:: img/CMC.png + :width: 35% + .. include:: links.rst diff --git a/doc/img/CMC.png b/doc/img/CMC.png index 040e374242919317a45daa1994f359acdf48dcff..d1bb3a52d2ee2d8445061ff2b9a5d035db211499 100644 Binary files a/doc/img/CMC.png and b/doc/img/CMC.png differ diff --git a/doc/img/DET.png b/doc/img/DET.png index e0732263822c3b480b6c35f32a002a1c42e7cf6a..e6130ce95998753960e5f8d6af13918ca0bd7690 100644 Binary files a/doc/img/DET.png and b/doc/img/DET.png differ diff --git a/doc/img/ROC.png b/doc/img/ROC.png index fbb17b175ad6cc730a75fa895d9d036ce9cc0aac..46ca262a197868a72e54e7bda3688804000722df 100644 Binary files a/doc/img/ROC.png and b/doc/img/ROC.png differ diff --git a/setup.py b/setup.py index 60375166dd3210e031b4e4cfe494957574cb8f85..c5c65897887811ea492320705955fdda881d88a6 100644 --- a/setup.py +++ b/setup.py @@ -189,10 +189,19 @@ setup( 'bic-jets = bob.bio.face.config.algorithm.bic_jets:algorithm', # BIC on gabor jets ], - # main entry for bob bio cli - 'bob.bio.cli': [ - 'face = bob.bio.face.script.face:face', - ], + #baselines + 'bob.bio.baseline':[ + 'eigenface = bob.bio.face.baseline.baseline:eigenface', + 'lda = bob.bio.face.baseline.baseline:lda', + 'plda = bob.bio.face.baseline.baseline:plda', + 'gabor_graph = bob.bio.face.baseline.baseline:gabor_graph', + 'lgbphs = bob.bio.face.baseline.baseline:lgbphs', + 'gmm = bob.bio.face.baseline.baseline:gmm', + 'isv = bob.bio.face.baseline.baseline:isv', + 'ivector = bob.bio.face.baseline.baseline:ivector', + 'bic = bob.bio.face.baseline.baseline:bic', + ], + }, # Classifiers are important if you plan to distribute this package through