Commit 1145e469 authored by Manuel Günther's avatar Manuel Günther
Browse files

First running version of bob.bio.csu

parents
*~
*.swp
*.pyc
bin
eggs
parts
.installed.cfg
.mr.developer.cfg
*.egg-info
src
develop-eggs
sphinx
dist
This diff is collapsed.
include README.rst bootstrap-buildout.py buildout.cfg COPYING version.txt requirements.txt
recursive-include doc *.py *.rst
.. vim: set fileencoding=utf-8 :
.. Manuel Guenther <manuel.guenther@idiap.ch>
.. Fri Sep 19 12:51:09 CEST 2014
.. image:: http://img.shields.io/badge/docs-stable-yellow.png
:target: http://pythonhosted.org/xfacereclib.extension.CSU/index.html
.. image:: http://img.shields.io/badge/docs-latest-orange.png
:target: https://www.idiap.ch/software/bob/docs/latest/bioidiap/xfacereclib.extension.CSU/master/index.html
.. image:: https://img.shields.io/badge/github-master-0000c0.png
:target: https://github.com/bioidiap/xfacereclib.extension.CSU/tree/master
.. image:: http://img.shields.io/pypi/v/xfacereclib.extension.CSU.png
:target: https://pypi.python.org/pypi/xfacereclib.extension.CSU
.. image:: http://img.shields.io/pypi/dm/xfacereclib.extension.CSU.png
:target: https://pypi.python.org/pypi/xfacereclib.extension.CSU
.. image:: https://img.shields.io/badge/original-software-a000a0.png
:target: http://www.cs.colostate.edu/facerec
===================================================================
FaceRecLib Wrapper classes for the CSU Face Recognition Resources
===================================================================
This satellite package to the FaceRecLib_ provides wrapper classes for the CSU face recognition resources, which can be downloaded from http://www.cs.colostate.edu/facerec.
Two algorithms are provided by the CSU toolkit (and also by this satellite package): the local region PCA (LRPCA) and the LDA-IR (also known as CohortLDA).
For more information about the LRPCA and the LDA-IR algorithm, please refer to the documentation on http://www.cs.colostate.edu/facerec/.
For further information about the FaceRecLib_, please read `its Documentation <http://pythonhosted.org/facereclib/index.html>`_.
On how to use this package in a face recognition experiment, please see http://pypi.python.org/pypi/xfacereclib.paper.BeFIT2012
Installation Instructions
-------------------------
The current package is just a set of wrapper classes for the CSU facerec2010 module, which is contained in the `CSU Face Recognition Resources <http://www.cs.colostate.edu/facerec>`_, where you need to download the Baseline 2011 Algorithms.
Please make sure that you have read installation instructions in the Documentation_ of this package on how to patch the original source code to work with our algorithms, before you try to go on woth this package.
.. note::
Since the original CSU resources are not Python3 compatible, this package only supports Python2.
For external dependencies of the CSU resources, please read their `README <http://www.cs.colostate.edu/facerec/algorithms/README.pdf>`__.
The FaceRecLib_ and parts of this package rely on Bob_, an open-source signal-processing and machine learning toolbox.
For Bob_ to be able to work properly, some dependent packages are required to be installed.
Please make sure that you have read the `Dependencies <https://github.com/idiap/bob/wiki/Dependencies>`_ for your operating system.
Documentation
-------------
For further documentation on this package, please read the `Stable Version <http://pythonhosted.org/xfacereclib.extension.CSU/index.html>`_ or the `Latest Version <https://www.idiap.ch/software/bob/docs/latest/bioidiap/xfacereclib.extension.CSU/master/index.html>`_ of the documentation.
For a list of tutorials on packages ob Bob_, or information on submitting issues, asking questions and starting discussions, please visit its website.
.. _bob: https://www.idiap.ch/software/bob
.. _facereclib: http://pypi.python.org/pypi/facereclib
#see http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
#see http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
from . import preprocessor
from . import extractor
from . import algorithm
from . import utils
from . import test
def get_config():
"""Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Mon Oct 29 09:27:59 CET 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import facerec2010
import bob.bio.base
from .. import utils
class LDAIR (bob.bio.base.algorithm.Algorithm):
"""This class defines a wrapper for the facerec2010.baseline.lda.LRLDA class to be used as a face recognition :py:class:`facereclib.tools.Tool` in the :ref:`FaceRecLib <facereclib>`."""
def __init__(
self,
REGION_ARGS,
REGION_KEYWORDS,
multiple_model_scoring = 'max', # by default, compute the average between several models and the probe
multiple_probe_scoring = 'max' # by default, compute the average between the model and several probes
):
"""Constructor Documentation:
REGION_ARGS
The region arguments as taken from facerec2010.baseline.lda.CohortLDA_REGIONS
REGION_KEYWORDS
The region keywords as taken from facerec2010.baseline.lda.CohortLDA_KEYWORDS
multiple_model_scoring
The scoring strategy if models are enrolled from several images, see facereclib.tools.Tool for more information.
multiple_probe_scoring
The scoring strategy if a score is computed from several probe images, see facereclib.tools.Tool for more information.
"""
bob.bio.base.algorithm.Algorithm.__init__(self, multiple_model_scoring=multiple_model_scoring, multiple_probe_scoring=multiple_probe_scoring, **REGION_KEYWORDS)
self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
self.use_cohort = 'cohort_adjust' not in REGION_ARGS[0] or REGION_ARGS[0]['cohort_adjust']
def _check_feature(self, feature):
"""Checks that the features are of the desired data type."""
assert isinstance(feature, facerec2010.baseline.common.FaceRecord)
assert hasattr(feature, "features")
def load_projector(self, projector_file):
"""This function loads the Projector from the given projector file.
This is only required when the cohort adjustment is enabled.
"""
# To avoid re-training the Projector, we load the Extractor file instead.
# This is only required when the cohort adjustment is enabled, otherwise the default parametrization of LDA-IR should be sufficient.
# Be careful, THIS IS A HACK and it might not work in all circumstances!
if self.use_cohort:
extractor_file = projector_file.replace("Projector", "Extractor")
self.ldair = utils.load_pickle(extractor_file)
def enroll(self, enroll_features):
"""Enrolls a model from features from several images by simply storing all given features."""
[self._check_feature(f) for f in enroll_features]
# just store all features (should be of type FaceRecord)
# since the given features are already in the desired format, there is nothing to do.
return enroll_features
def write_model(self, model, model_file):
"""Saves the enrolled model to file using the pickle module."""
# just dump the model to .pkl file
utils.save_pickle(model, model_file)
def read_model(self, model_file):
"""Loads an enrolled model from file using the pickle module."""
# just read the model from .pkl file
return utils.load_pickle(model_file)
# probe and model are identically stored in a .pkl file
read_probe = read_model
def score(self, model, probe):
"""Compute the score for the given model (a list of FaceRecords) and a probe (a FaceRecord)"""
if isinstance(model, list):
# compute score fusion strategy with several model features (which is implemented in the base class)
return self.score_for_multiple_models(model, probe)
else:
self._check_feature(model)
self._check_feature(probe)
return self.ldair.similarityMatrix([probe], [model])[0,0]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Mon Oct 29 09:27:59 CET 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import facerec2010
import bob.bio.base
import numpy
from .. import utils
class LRPCA (bob.bio.base.algorithm.Algorithm):
"""This class defines a wrapper for the facerec2010.baseline.lrpca.LRPCA class to be used as a face recognition :py:class:`facereclib.tools.Tool` in the :ref:`FaceRecLib <facereclib>`."""
def __init__(
self,
TUNING,
multiple_model_scoring = 'max', # by default, compute the average between several models and the probe
multiple_probe_scoring = 'max' # by default, compute the average between the model and several probes
):
"""Constructor Documentation:
TUNING
The tuning for the LRPCA algorithm as taken from the facerec2010.baseline.lrpca.GBU_TUNING
multiple_model_scoring
The scoring strategy if models are enrolled from several images, see facereclib.tools.Tool for more information.
multiple_probe_scoring
The scoring strategy if a score is computed from several probe images, see facereclib.tools.Tool for more information.
"""
bob.bio.base.algorithm.Algorithm.__init__(self, multiple_model_scoring=multiple_model_scoring, multiple_probe_scoring=multiple_probe_scoring, **TUNING)
# initialize LRPCA (not sure if this is really required)
self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
def _check_feature(self, feature):
"""Assures that the feature is of the desired type"""
assert isinstance(feature, numpy.ndarray)
assert feature.ndim == 1
assert feature.dtype == numpy.float64
def _check_model(self, model):
"""Assures that the model is of the desired type"""
assert isinstance(model, facerec2010.baseline.pca.FaceRecord)
assert hasattr(model, "feature")
def enroll(self, enroll_features):
"""Enrolls a model from features from several images by simply storing all given features."""
# no rule to enroll features in the LRPCA setup, so we just store all features
# create model Face records
model_records = []
for feature in enroll_features:
model_record = facerec2010.baseline.pca.FaceRecord(None,None,None)
model_record.feature = feature[:]
model_records.append(model_record)
return model_records
def save_model(self, model, model_file):
"""Saves the enrolled model to file using the pickle module."""
# just dump the model to .pkl file
utils.save_pickle(model, model_file)
def read_model(self, model_file):
"""Loads an enrolled model from file using the pickle module."""
# just read the model from .pkl file
return utils.load_pickle(model_file)
def score(self, model, probe):
"""Computes the score for the given model (a list of FaceRecords) and a probe feature (a numpy.ndarray)"""
if isinstance(model, list):
# compute score fusion strategy with several model features (which is implemented in the base class)
return self.score_for_multiple_models(model, probe)
else:
self._check_model(model)
self._check_feature(probe)
# compute score for one model and one probe
probe_record = facerec2010.baseline.pca.FaceRecord(None,None,None)
probe_record.feature = probe
return self.lrpca.similarityMatrix([probe_record], [model])[0,0]
from .LRPCA import LRPCA
from .LDAIR import LDAIR
import facerec2010
import bob.bio.csu
algorithm = bob.bio.csu.algorithm.LDAIR(
REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS,
REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS
)
import facerec2010
import bob.bio.csu
algorithm = bob.bio.csu.algorithm.LRPCA(
TUNING = facerec2010.baseline.lrpca.GBU_TUNING
)
import facerec2010
import bob.bio.csu
extractor = bob.bio.csu.extractor.LDAIR(
REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS,
REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS
)
import facerec2010
import bob.bio.csu
extractor = bob.bio.csu.extractor.LRPCA(
TUNING = facerec2010.baseline.lrpca.GBU_TUNING
)
import facerec2010
import bob.bio.csu
preprocessor = bob.bio.csu.preprocessor.LDAIR(
REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS,
REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS
)
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment