"""Performs a principal component analysis (PCA) on the given data.
This algorithm computes a PCA projection (:py:class:`bob.learn.linear.PCATrainer`) on the given training images, and projects the images into face space.
In opposition to :py:class:`bob.bio.base.algorithm.PCA`, here the eigenfces are used as features, i.e., to apply advanced face recognition algorithms on top of them.
**Parameters:**
subspace_dimension : int or float
If specified as ``int``, defines the number of eigenvectors used in the PCA projection matrix.
If specified as ``float`` (between 0 and 1), the number of eigenvectors is calculated such that the given percentage of variance is kept.
kwargs : ``key=value`` pairs
A list of keyword arguments directly passed to the :py:class:`bob.bio.base.extractor.Extractor` base class constructor.
"""
def__init__(self,subspace_dimension):
# We have to register that this function will need a training step
...
...
@@ -22,17 +35,29 @@ class Eigenface (Extractor):
def_check_data(self,data):
"""Checks that the given data are appropriate."""
assertisinstance(data,numpy.ndarray)
assertdata.ndim==2
assertdata.dtype==numpy.float64
deftrain(self,image_list,extractor_file):
"""Trains the eigenface extractor using the given list of training images"""
[self._check_data(image)forimageinimage_list]
deftrain(self,training_images,extractor_file):
"""Generates the PCA covariance matrix and writes it into the given extractor_file.
Beforehand, all images are turned into a 1D pixel vector.
**Parameters:**
training_images : [2D :py:class:`numpy.ndarray`]
A list of 2D training images to train the PCA projection matrix with.
extractor_file : str
A writable file, into which the PCA projection matrix (as a :py:class:`bob.learn.linear.Machine`) will be written.
"""Crops the face according to the given annotations"""
"""Crops the face according to the given annotations.
This class is designed to perform a geometric normalization of the face based on the eye locations, using :py:class:`bob.ip.base.FaceEyesNorm`.
Usually, when executing the :py:meth:`crop_face` function, the image and the eye locations have to be specified.
There, the given image will be transformed such that the eye locations will be placed at specific locations in the resulting image.
These locations, as well as the size of the cropped image, need to be specified in the constructor of this class, as ``cropped_positions`` and ``cropped_image_size``.
Some image databases do not provide eye locations, but rather bounding boxes.
This is not a problem at all.
Simply define the coordinates, where you want your ``cropped_positions`` to be in the cropped image, by specifying the same keys in the dictionary that will be given as ``annotations`` to the :py:meth:`face_crop` function.
.. note;::
These locations can even be outside of the cropped image boundary, i.e., when the crop should be smaller than the annotated bounding boxes.
Sometimes, databases provide pre-cropped faces, where the eyes are located at (almost) the same position in all images.
Usually, the cropping does not conform with the cropping that you like (i.e., image resolution is wrong, or too much background information).
However, the database does not provide eye locations (since they are almost identical for all images).
In that case, you can specify the ``fixed_positions`` in the constructor, which will be taken instead of the ``annotations`` inside the :py:meth:`crop_face` function (in which case the ``annotations`` are ignored).
Sometimes, the crop of the face is outside of the original image boundaries.
Usually, these pixels will simply be left black, resulting in sharp edges in the image.
However, some feature extractors do not like these sharp edges.
In this case, you can set the ``mask_sigma`` to copy pixels from the valid border of the image and add random noise (see :py:func:`bob.ip.base.extrapolate_mask`).
**Parameters:**
cropped_image_size : (int, int)
The size of the resulting cropped images.
cropped_positions : dict
The coordinates in the cropped image, where the annotated points should be put to.
This parameter is a dictionary with usually two elements, e.g., ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : ...}`` are supported, as long as the ``annotations`` in the :py:meth:`__call__` function are present.
fixed_positions : dict or None
If specified, ignore the annotations from the database and use these fixed positions throughout.
mask_sigma : float or None
Fill the area outside of image boundaries with random pixels from the border, by adding noise to the pixel values.
To disable extrapolation, set this value to ``None``.
To disable adding random noise, set it to a negative value or 0.
mask_neighbors : int
The number of neighbors used during mask extrapolation.
See :py:func:`bob.ip.base.extrapolate_mask` for details.
mask_seed : int or None
The random seed to apply for mask extrapolation.
.. warning::
When run in parallel, the same random seed will be applied to all parallel processes.
Hence, results of parallel execution will differ from the results in serial execution.
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
"""
def__init__(
self,
...
...
@@ -36,38 +92,6 @@ class FaceCrop (Base):
mask_seed=None,# The seed for generating random values during extrapolation
**kwargs# parameters to be written in the __str__ method
):
"""Parameters of the constructor of this preprocessor:
cropped_image_size : (int, int)
The size of the resulting cropped images.
cropped_positions : dict
The coordinates in the cropped image, where the annotated points should be put to.
This parameter is a dictionary with usually two elements, e.g., ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : ...}`` are supported, as long as the ``annotations`` in the :py:meth:`__call__` function are present.
fixed_positions : dict or None
If specified, ignore the annotations from the database and use these fixed positions throughout.
mask_sigma : float or None
Fill the area outside of image boundaries with random pixels from the border, by adding noise to the pixel values.
To disable extrapolation, set this value to None.
To disable adding random noise, set it to a negative value or 0.
mask_neighbors : int
The number of neighbors used during mask extrapolation.
See :py:func:`bob.ip.base.extrapolate_mask` for details.
mask_seed : int or None
The random seed to apply for mask extrapolation.
.. warning::
When run in parallel, the same random seed will be applied to all parallel processes.
Hence, results of parallel execution will differ from the results in serial execution.
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
"""
Base.__init__(self,**kwargs)
...
...
@@ -102,7 +126,24 @@ class FaceCrop (Base):
defcrop_face(self,image,annotations=None):
"""Executes the face cropping on the given image and returns the cropped version of it"""
"""crop_face(image, annotations = None) -> face
Executes the face cropping on the given image and returns the cropped version of it.
**Parameters:**
image : 2D :py:class:`numpy.ndarray`
The face image to be processed.
annotations : dict or ``None``
The annotations that fit to the given image.
``None`` is only accepted, when ``fixed_positions`` were specified in the constructor.
**Returns:**
face : 2D :py:class:`numpy.ndarray` (float)
The cropped face.
"""
ifself.fixed_positionsisnotNone:
annotations=self.fixed_positions
ifannotationsisNone:
...
...
@@ -135,7 +176,27 @@ class FaceCrop (Base):
def__call__(self,image,annotations=None):
"""Aligns the given image according to the given annotations."""
"""__call__(image, annotations = None) -> face
Aligns the given image according to the given annotations.
First, the desired color channel is extracted from the given image.
Afterward, the face is cropped, according to the given ``annotations`` (or to ``fixed_positions``, see :py:meth:`crop_face`).
Finally, the resulting face is converted to the desired data type.
"""Performs a face detection (and facial landmark localization) in the given image and crops the face.
This class is designed to perform a geometric normalization of the face based on the detected face.
Face detection is performed using :ref:`bob.ip.facedetect <bob.ip.facedetect>`.
Particularly, the function :py:func:`bob.ip.facedetect.detect_single_face` is executed, which will *always* return *exactly one* bounding box, even if the image contains more than one face, or no face at all.
The speed of the face detector can be regulated using the ``cascade``, ``distance` ``scale_base`` and ``lowest_scale`` parameters.
The number of overlapping detected bounding boxes that should be joined can be selected by ``detection_overlap``.
Please see the documentation of :ref:`bob.ip.facedetect <bob.ip.facedetect>` for more details about these parameters.
Additionally, facial landmarks can be detected using the :ref:`bob.ip.flandmark`.
If enabled using ``use_flandmark = True`` in the constructor, it is tried to obtain the facial landmarks inside the detected facial area.
If landmarks are found, these are used to geometrically normalize the face.
Otherwise, the eye locations are estimated based on the bounding box.
This is also applied, when ``use_flandmark = False.``
The face cropping itself is done by the given ``face_cropper``.
This cropper can either be an instance of :py:class:`FaceCrop` (or any other class that provides a similar ``crop_face`` function), or it can be the resource name of a face cropper, such as ``'face-crop-eyes'``.
**Parameters:**
face_cropper : :py:class:`bob.bio.face.preprocessor.FaceCrop` or str
The face cropper to be used to crop the detected face.
Might be an instance of a :py:class:`FaceCrop` or the name of a face cropper resource.
cascade : str or ``None``
The file name, where a face detector cascade can be found.
If ``None``, the default cascade for frontal faces :py:func:`bob.ip.facedetect.default_cascade` is used.
use_flandmark : bool
If selected, :py:class:`bob.ip.flandmark.Flandmark` is used to detect the eye locations.
Otherwise, the eye locations are estimated based on the detected bounding box.
detection_overlap : float
See :py:func:`bob.ip.facedetect.detect_single_face`.
distance : int
See the Sampling section in the :ref:`Users Guide of bob.ip.facedetect <bob.ip.facedetect>`.
scale_base : float
See the Sampling section in the :ref:`Users Guide of bob.ip.facedetect <bob.ip.facedetect>`.
lowest_scale : float
See the Sampling section in the :ref:`Users Guide of bob.ip.facedetect <bob.ip.facedetect>`.
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
"""
def__init__(
self,
...
...
@@ -25,12 +72,8 @@ class FaceDetect (Base):
distance=2,
scale_base=math.pow(2.,-1./16.),
lowest_scale=0.125,
mask_sigma=None,# The sigma for random values areas outside image
mask_neighbors=5,# The number of neighbors to consider while extrapolating
mask_seed=None,# The seed for generating random values during extrapolation
**kwargs
):
"""Performs a face detection in the given image (ignoring any annotations)."""
# call base class constructors
Base.__init__(self,**kwargs)
...
...
@@ -60,6 +103,7 @@ class FaceDetect (Base):
def_landmarks(self,image,bounding_box):
"""Try to detect the landmarks in the given bounding box, and return the eye locations."""
# get the landmarks in the face
ifself.flandmarkisnotNone:
# use the flandmark detector
...
...
@@ -86,6 +130,23 @@ class FaceDetect (Base):
defcrop_face(self,image,annotations=None):
"""crop_face(image, annotations = None) -> face
Detects the face (and facial landmarks), and used the ``face_cropper`` given in the constructor to crop the face.