Commit 8e12771b authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira Committed by Amir MOHAMMADI
Browse files

[sphinx] Clean up documentation and prepare the floor for Manuel's students

parent 20807738
......@@ -5,6 +5,7 @@ from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
import numpy as np
def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with Facenet-like
......@@ -32,32 +33,51 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
elif annotation_type == "eyes-center":
RIGHT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(1/3*CROPPED_IMAGE_WIDTH))
LEFT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(2/3*CROPPED_IMAGE_WIDTH))
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
RIGHT_EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(1 / 3 * CROPPED_IMAGE_WIDTH),
)
LEFT_EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(2 / 3 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "left-profile":
EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(3 / 8 * CROPPED_IMAGE_WIDTH),
)
MOUTH_POS = (
round(5 / 7 * CROPPED_IMAGE_HEIGHT),
round(3 / 8 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
elif annotation_type == "right-profile":
EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
EYE_POS = (
round(2 / 7 * CROPPED_IMAGE_HEIGHT),
round(5 / 8 * CROPPED_IMAGE_WIDTH),
)
MOUTH_POS = (
round(5 / 7 * CROPPED_IMAGE_HEIGHT),
round(5 / 8 * CROPPED_IMAGE_WIDTH),
)
cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
else:
cropped_positions = None
return cropped_positions
def legacy_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with legacy extractors,
......@@ -85,33 +105,41 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
cropped_positions = {"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
elif annotation_type == "eyes-center":
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
cropped_positions = {"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "left-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
cropped_positions = {"leye": EYE_POS, "mouth": MOUTH_POS}
elif annotation_type == "right-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT // 3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
cropped_positions = {"reye": EYE_POS, "mouth": MOUTH_POS}
else:
cropped_positions = None
return cropped_positions
def embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions=None, color_channel = "rgb"):
def embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions=None,
color_channel="rgb",
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
......@@ -121,13 +149,15 @@ def embedding_transformer(cropped_image_size, embedding, annotation_type, croppe
"""
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
)
cropped_image_size,
color_channel=color_channel,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
)
transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
transform_extra_arguments = (
None if cropped_positions is None else (("annotations", "annotations"),)
)
transformer = make_pipeline(
wrap(
......@@ -140,7 +170,10 @@ def embedding_transformer(cropped_image_size, embedding, annotation_type, croppe
return transformer
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions, color_channel="rgb"):
def embedding_transformer_160x160(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
......@@ -149,12 +182,23 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions, c
This will resize images to :math:`160 \times 160`
"""
cropped_positions = embedding_transformer_default_cropping((160, 160), annotation_type)
cropped_positions = embedding_transformer_default_cropping(
(160, 160), annotation_type
)
return embedding_transformer((160, 160), embedding, annotation_type, cropped_positions, fixed_positions, color_channel=color_channel)
return embedding_transformer(
(160, 160),
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
)
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions, color_channel="rgb"):
def embedding_transformer_112x112(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
......@@ -166,12 +210,52 @@ def embedding_transformer_112x112(embedding, annotation_type, fixed_positions, c
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {'leye': (32, 77), 'reye': (32, 34)}
cropped_positions = {"leye": (32, 77), "reye": (32, 34)}
else:
# Will use default
cropped_positions = embedding_transformer_default_cropping(cropped_image_size, annotation_type)
# Will use default
cropped_positions = embedding_transformer_default_cropping(
cropped_image_size, annotation_type
)
return embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions, color_channel=color_channel)
return embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
)
def embedding_transformer_224x224(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
"""
cropped_image_size = (224, 224)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
else:
# Will use default
cropped_positions = embedding_transformer_default_cropping(
cropped_image_size, annotation_type
)
return embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
)
def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
......@@ -212,15 +296,16 @@ def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
dtype=dtype
)
transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
cropped_image_size,
color_channel=color_channel,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
dtype=dtype,
)
transform_extra_arguments = (
None if cropped_positions is None else (("annotations", "annotations"),)
)
return face_cropper, transform_extra_arguments
......@@ -93,7 +93,7 @@ class FaceCrop(Base):
your database easily. If you are sure about your input, you can set this flag to
``True``.
annotator : :any:`bob.bio.base.annotator.Annotator`
annotator : `bob.bio.base.annotator.Annotator`
If provided, the annotator will be used if the required annotations are
missing.
......
......@@ -28,8 +28,8 @@ from bob.pipelines.sample import SampleBatch
class HistogramEqualization(Base):
"""Crops the face (if desired) and performs histogram equalization to photometrically enhance the image.
Parameters:
-----------
Parameters
----------
face_cropper : str or :py:class:`bob.bio.face.preprocessor.FaceCrop` or :py:class:`bob.bio.face.preprocessor.FaceDetect` or ``None``
The face image cropper that should be applied to the image.
......
......@@ -7,7 +7,7 @@
=================
This packages provides several face annotators (using RGB images) that you can
use to annotate biometric databases. See :ref:`bob.bio.base.annotations` for
use to annotate biometric databases. See :any:`bob.bio.base.annotator` for
a guide on the general usage of this feature.
.. warning::
......
......@@ -8,60 +8,51 @@
Executing Baseline Algorithms
=============================
.. todo::
Here we should:
- Brief how to run an experiment
- Point to bob.bio.base for further explanation
- Show the baselines available
- Show the databases available
In this section we introduce the baselines available in this pakcage.
To execute one of then in the databases available just run the following command::
The baselines
-------------
$ bob bio pipelines vanilla-biometrics [DATABASE_NAME] [BASELINE]
The algorithms present an (incomplete) set of state-of-the-art face recognition algorithms. Here is the list of short-cuts:
.. note::
Both, `[DATABASE_NAME]` and `[BASELINE]` can be either python resources or
python files.
* ``eigenface``: The eigenface algorithm as proposed by [TP91]_. It uses the pixels as raw data, and applies a *Principal Component Analysis* (PCA) on it:
Please, refer to :ref:`bob.bio.base <bob.bio.base>` for more information.
- preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
- feature : :py:class:`bob.bio.base.extractor.Linearize`
- algorithm : :py:class:`bob.bio.base.algorithm.PCA`
* ``lda``: The LDA algorithm applies a *Linear Discriminant Analysis* (LDA), here we use the combined PCA+LDA approach [ZKC98]_:
- preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
- feature : :py:class:`bob.bio.face.extractor.Eigenface`
- algorithm : :py:class:`bob.bio.base.algorithm.LDA`
Baselines available
-------------------
* ``gaborgraph``: This method extract grid graphs of Gabor jets from the images, and computes a Gabor phase based similarity [GHW12]_.
The algorithms below constains all the face recognition baselines available.
It is split in two groups, before and after deep learning era.
- preprocessor : :py:class:`bob.bio.face.preprocessor.INormLBP`
- feature : :py:class:`bob.bio.face.extractor.GridGraph`
- algorithm : :py:class:`bob.bio.face.algorithm.GaborJet`
Before Deep learning era
========================
Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` package is installed:
* ``gmm``: *Gaussian Mixture Models* (GMM) [MM09]_ are extracted from *Discrete Cosine Transform* (DCT) block features.
* ``eigenface``: The eigenface algorithm as proposed by [TP91]_. It uses the pixels as raw data, and applies a *Principal Component Analysis* (PCA) on it.
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
- algorithm : :py:class:`bob.bio.gmm.algorithm.GMM`
* ``lda``: The LDA algorithm applies a *Linear Discriminant Analysis* (LDA), here we use the combined PCA+LDA approach [ZKC98]_
* ``isv``: As an extension of the GMM algorithm, *Inter-Session Variability* (ISV) modeling [WMM11]_ is used to learn what variations in images are introduced by identity changes and which not.
* ``gabor_graph``: This method extract grid graphs of Gabor jets from the images, and computes a Gabor phase based similarity [GHW12]_.
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
- algorithm : :py:class:`bob.bio.gmm.algorithm.ISV`
* ``lgbphs``: Local Gabor binary pattern histogram sequence (LGBPHS) implemented in [ZSG05]_
* ``ivector``: Another extension of the GMM algorithm is *Total Variability* (TV) modeling [WM12]_ (aka. I-Vector), which tries to learn a subspace in the GMM super-vector space.
- preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
- feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
- algorithm : :py:class:`bob.bio.gmm.algorithm.IVector`
Deep learning baselines
=======================
.. note::
The ``ivector`` algorithm needs a lot of training data and fails on small databases such as the `AT&T database`_.
* ``facenet-sanderberg``: FaceNet trained by `David Sanderberg <https://github.com/davidsandberg/facenet>`_
* ``inception-resnetv2-msceleb``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
* ``inception-resnetv1-msceleb``: Inception Resnet v1 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
* ``inception-resnetv2-casiawebface``: Inception Resnet v2 model trained using the Casia Web dataset in the context of the work published by [TFP18]_
.. _bob.bio.base.baseline_results:
* ``inception-resnetv1-casiawebface``: Inception Resnet v1 model trained using the Casia Web dataset in the context of the work published by [TFP18]_
* ``arcface-insightface``: Arcface model from `Insightface <https://github.com/deepinsight/insightface>`_
......@@ -14,15 +14,11 @@ Databases
bob.bio.face.database.ARFaceBioDatabase
bob.bio.face.database.AtntBioDatabase
bob.bio.face.database.MobioBioDatabase
bob.bio.face.database.CaspealBioDatabase
bob.bio.face.database.ReplayBioDatabase
bob.bio.face.database.ReplayMobileBioDatabase
bob.bio.face.database.GBUBioDatabase
bob.bio.face.database.LFWBioDatabase
bob.bio.face.database.MultipieBioDatabase
bob.bio.face.database.XM2VTSBioDatabase
bob.bio.face.database.FRGCBioDatabase
bob.bio.face.database.SCFaceBioDatabase
bob.bio.face.database.FargoBioDatabase
......
......@@ -4,9 +4,9 @@
.. _bob.bio.face:
=============================
Open Source Face Recognition
=============================
=====================================
Open Source Face Recognition Library
=====================================
This package provide open source tools to run comparable and reproducible face recognition experiments.
......@@ -15,32 +15,30 @@ This includes:
* Preprocessors to detect, align and photometrically enhance face images
* Feature extractors that extract features from facial images
* Facial image databases including their protocols.
* Scripts that trains CNNs
For more detailed information about how this package is structured, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`.
* Scripts that trains CNNs for face recognition.
Get Started
============
===========
The easiest way to get started is by simply comparing two faces::
$ bob bio compare-samples -p gabor_graph me.png not_me.png
$ bob bio compare-samples -p facenet-sanderberg me.png not_me.png
.. warning::
No face detection is carried out with this command.
Check out all the face recognition algorithms available by doing::
$ resources.py --type p
$ resources.py --types p
Get Started, serious
====================
.. todo::
Briefing about baselines
For detailed information on how this package is structured and how
to run experiments with it, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`
and get to know the vanilla biometrics and how to integrate both, algorithm and database protocols with it.
Users Guide
......@@ -50,7 +48,7 @@ Users Guide
:maxdepth: 2
baselines
leaderboad
leaderboard/leaderboard
references
annotators
......
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard:
=============================
Leaderboad
=============================
.. todo::
Here we should:
- Present a custom Leaderboad per database
- Present a script that runs at least one experiment of this leader board
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.arface:
==============
ARFACE Dataset
==============
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.banca:
=============
Banca Dataset
=============
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.gbu:
===========
GBU Dataset
===========
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.ijbc:
=============
IJB-C Dataset
=============
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard:
==========
Leaderboad
==========
In the following pages we present a face recognition learderboard with some popular datasets.
Datasets
--------
.. toctree::
:maxdepth: 2
mobio
lfw
meds
morph
ijbc
uccs
multipie
arface
xm2vts
gbu
banca
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.lfw:
===========
LFW Dataset
===========
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.meds:
============
MEDS Dataset
============
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.mobio:
=============
Mobio Dataset
=============
.. todo::
Benchmarks on Mobio Database
Probably for Manuel's students
\ No newline at end of file
.. vim: set fileencoding=utf-8 :
.. _bob.bio.face.learderboard.morph:
=============
Morph Dataset
=============
.. todo::
Present benchmarks
Probably for Manuel's students
\ No newline at end of file