Commit 0855d2c1 authored by Manuel Günther's avatar Manuel Günther

Merge branch 'face_detect'

parents bb40842a ed44634a
...@@ -25,7 +25,7 @@ install: ...@@ -25,7 +25,7 @@ install:
- mkdir PythonFaceEvaluation - mkdir PythonFaceEvaluation
- wget https://www.idiap.ch/software/bob/databases/latest/PythonFaceEvaluation.tar.bz2 - wget https://www.idiap.ch/software/bob/databases/latest/PythonFaceEvaluation.tar.bz2
- tar -xjf PythonFaceEvaluation.tar.bz2 -C PythonFaceEvaluation - tar -xjf PythonFaceEvaluation.tar.bz2 -C PythonFaceEvaluation
- ./bin/patch_CSU.py . - ./bin/patch_CSU.py PythonFaceEvaluation
- ./bin/buildout buildout:develop="PythonFaceEvaluation ." buildout:extensions=bob.buildout buildout:auto-checkout= buildout:debug=false - ./bin/buildout buildout:develop="PythonFaceEvaluation ." buildout:extensions=bob.buildout buildout:auto-checkout= buildout:debug=false
script: script:
- ./bin/python -c 'import pkg_resources; from bob.bio.csu import get_config; print(get_config())' - ./bin/python -c 'import pkg_resources; from bob.bio.csu import get_config; print(get_config())'
......
...@@ -41,10 +41,10 @@ class LDAIR (bob.bio.base.algorithm.Algorithm): ...@@ -41,10 +41,10 @@ class LDAIR (bob.bio.base.algorithm.Algorithm):
""" """
def __init__( def __init__(
self, self,
REGION_ARGS, REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS,
REGION_KEYWORDS, REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS,
multiple_model_scoring = 'max', # by default, compute the average between several models and the probe multiple_model_scoring = 'max', # by default, compute the maximum score between several models and the probe
multiple_probe_scoring = 'max' # by default, compute the average between the model and several probes multiple_probe_scoring = 'max' # by default, compute the maximum score between the model and several probes
): ):
bob.bio.base.algorithm.Algorithm.__init__(self, multiple_model_scoring=multiple_model_scoring, multiple_probe_scoring=multiple_probe_scoring, **REGION_KEYWORDS) bob.bio.base.algorithm.Algorithm.__init__(self, multiple_model_scoring=multiple_model_scoring, multiple_probe_scoring=multiple_probe_scoring, **REGION_KEYWORDS)
self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS) self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
......
...@@ -41,7 +41,7 @@ class LRPCA (bob.bio.base.algorithm.Algorithm): ...@@ -41,7 +41,7 @@ class LRPCA (bob.bio.base.algorithm.Algorithm):
def __init__( def __init__(
self, self,
TUNING, TUNING = facerec2010.baseline.lrpca.GBU_TUNING,
multiple_model_scoring = 'max', # by default, compute the average between several models and the probe multiple_model_scoring = 'max', # by default, compute the average between several models and the probe
multiple_probe_scoring = 'max' # by default, compute the average between the model and several probes multiple_probe_scoring = 'max' # by default, compute the average between the model and several probes
): ):
......
...@@ -39,7 +39,7 @@ class LDAIR (bob.bio.base.extractor.Extractor): ...@@ -39,7 +39,7 @@ class LDAIR (bob.bio.base.extractor.Extractor):
REGION_KEYWORDS : dict REGION_KEYWORDS : dict
The region keywords as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_KEYWORDS`. The region keywords as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_KEYWORDS`.
""" """
def __init__(self, REGION_ARGS, REGION_KEYWORDS): def __init__(self, REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS, REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS):
bob.bio.base.extractor.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **REGION_KEYWORDS) bob.bio.base.extractor.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **REGION_KEYWORDS)
self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS) self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
self.layers = len(REGION_ARGS) self.layers = len(REGION_ARGS)
......
...@@ -37,7 +37,7 @@ class LRPCA (bob.bio.base.extractor.Extractor): ...@@ -37,7 +37,7 @@ class LRPCA (bob.bio.base.extractor.Extractor):
The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`. The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`.
""" """
def __init__(self, TUNING): def __init__(self, TUNING = facerec2010.baseline.lrpca.GBU_TUNING):
bob.bio.base.extractor.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **TUNING) bob.bio.base.extractor.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **TUNING)
self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING) self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
......
...@@ -21,43 +21,66 @@ import facerec2010 ...@@ -21,43 +21,66 @@ import facerec2010
import pyvision import pyvision
import numpy import numpy
import bob.bio.base import bob.bio.base
import bob.bio.face
class LDAIR (bob.bio.base.preprocessor.Preprocessor): class LDAIR (bob.bio.face.preprocessor.FaceCrop):
"""This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRLDA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`. """This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRLDA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
**Parameters:** **Parameters:**
REGION_ARGS ``REGION_ARGS`` : []
The region arguments as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_REGIONS`. The region arguments as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_REGIONS`.
REGION_KEYWORDS ``REGION_KEYWORDS`` : {}
The region keywords as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_KEYWORDS`. The region keywords as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_KEYWORDS`.
``face_detector`` : :py:class:`bob.bio.face.preprocessor.FaceDetect` or str
The face detector to be used to detect the detected face.
Might be an instance of a :py:class:`FaceDetect` or the name of a face detector resource.
""" """
def __init__(self, REGION_ARGS, REGION_KEYWORDS): def __init__(self, REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS, REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS, face_detector = None):
bob.bio.base.preprocessor.Preprocessor.__init__(self, **REGION_KEYWORDS) bob.bio.base.preprocessor.Preprocessor.__init__(self, REGION_ARGS=str(REGION_ARGS), REGION_KEYWORDS=str(REGION_KEYWORDS), face_detector=str(face_detector))
self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS) self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
self.layers = len(REGION_ARGS) self.layers = len(REGION_ARGS)
self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
if self.face_detector is not None:
assert isinstance(self.face_detector, bob.bio.face.preprocessor.FaceDetect)
# asign ourself to be the face cropper that should be used after face detection
self.face_detector.cropper = self
def __call__(self, image, annotations):
"""Preprocesses the image using the LDA-IR preprocessor :py:meth:`facerec2010.baseline.lda.LRLDA.preprocess`. def _numpy_image(self, image):
"""Converts the givne image into a numpy color image in Bob format"""
np_image = image.asMatrix3D()
bob_image = numpy.ndarray((np_image.shape[0], np_image.shape[2], np_image.shape[1]), dtype = numpy.uint8)
# iterate over color layers
for j in range(np_image.shape[0]):
bob_image[j,:,:] = np_image[j].transpose()[:,:]
return bob_image
def crop_face(self, image, annotations):
"""crop_face(image, annotations = None) -> face
Executes the face cropping on the given image and returns the cropped version of it.
**Parameters:** **Parameters:**
image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray` ``image`` : 3D :py:class:`numpy.ndarray`
The color image that should be preprocessed. The face image to be processed.
annotations : dict ``annotations`` : dict
The eye annotations for the image. The eye annotations for the image.
They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective. They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.
**Returns:** **Returns:**
preprocessed : 3D numpy.ndarray face : 3D :py:class:`numpy.ndarray`
The preprocessed color image, in default Bob format. The cropped face.
""" """
# assure that the eye positions are in the set of annotations # assure that the eye positions are in the set of annotations
if annotations is None or 'leye' not in annotations or 'reye' not in annotations: if annotations is None or 'leye' not in annotations or 'reye' not in annotations:
raise ValueError("The LDA-IR image cropping needs eye positions, but they are not given.") raise ValueError("The LDA-IR image cropping needs eye positions, but they are not given.")
...@@ -84,17 +107,38 @@ class LDAIR (bob.bio.base.preprocessor.Preprocessor): ...@@ -84,17 +107,38 @@ class LDAIR (bob.bio.base.preprocessor.Preprocessor):
assert len(tiles) == self.layers assert len(tiles) == self.layers
assert (tiles[0].asMatrix3D() == tiles[1].asMatrix3D()).all() assert (tiles[0].asMatrix3D() == tiles[1].asMatrix3D()).all()
# Additionally, pyvision used images in (x,y)-order. # return the image in the format that Bob knows and understands
# To be consistent to the (y,x)-order in the facereclib, we have to transpose return self._numpy_image(tiles[0])
color_image = tiles[0].asMatrix3D()
out_images = numpy.ndarray((color_image.shape[0], color_image.shape[2], color_image.shape[1]), dtype = numpy.uint8)
# iterate over color layers
for j in range(color_image.shape[0]):
out_images[j,:,:] = color_image[j].transpose()[:,:]
# WARNING! This contradicts the default way, images are written. Here, we write full color information! def __call__(self, image, annotations):
return out_images """Preprocesses the image using the LDA-IR preprocessor :py:meth:`facerec2010.baseline.lda.LRLDA.preprocess`.
**Parameters:**
image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
The color image that should be preprocessed.
annotations : dict
The eye annotations for the image.
They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.
**Returns:**
preprocessed : 3D numpy.ndarray
The preprocessed color image, in default Bob format.
"""
if self.face_detector is not None:
if isinstance(image, pyvision.Image):
# the face detector requires numpy arrays
image = self._numpy_image(image)
import bob.io.base
import bob.io.image
bob.io.base.save(image.astype(numpy.uint8), "test.png")
# call face detector with the (tansformed) image
return self.face_detector.crop_face(image, annotations)
return self.crop_face(image, annotations)
def read_original_data(self, image_file): def read_original_data(self, image_file):
......
...@@ -19,24 +19,49 @@ ...@@ -19,24 +19,49 @@
import facerec2010 import facerec2010
import pyvision import pyvision
import PIL
import numpy import numpy
import bob.bio.base import bob.bio.base
import bob.bio.face
class LRPCA (bob.bio.base.preprocessor.Preprocessor): class LRPCA (bob.bio.face.preprocessor.FaceCrop):
"""This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRPCA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`. """This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRPCA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
**Parameters:** **Parameters:**
TUNING : dict ``TUNING`` : {}
The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`. The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`.
``face_detector`` : :py:class:`bob.bio.face.preprocessor.FaceDetect` or str
The face detector to be used to detect the detected face.
Might be an instance of a :py:class:`FaceDetect` or the name of a face detector resource.
""" """
def __init__(self, TUNING): def __init__(self, TUNING = facerec2010.baseline.lrpca.GBU_TUNING, face_detector = None):
bob.bio.base.preprocessor.Preprocessor.__init__(self, **TUNING) bob.bio.base.preprocessor.Preprocessor.__init__(self, TUNING=str(TUNING), face_detector=str(face_detector))
self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING) self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
if self.face_detector is not None:
assert isinstance(self.face_detector, bob.bio.face.preprocessor.FaceDetect)
# asign ourself to be the face cropper that should be used after face detection
self.face_detector.cropper = self
def __call__(self, image, annotations): def _py_image(self, image):
"""Converts the given image to pyvision images."""
pil_image = PIL.Image.new("L",(image.shape[1],image.shape[0]))
# TODO: Test if there is any faster method to convert the image type
for y in range(image.shape[0]):
for x in range(image.shape[1]):
# copy image content (re-order [y,x] to (x,y))
pil_image.putpixel((x,y),image[y,x])
# convert to pyvision image
py_image = pyvision.Image(pil_image)
return py_image
def crop_face(self, image, annotations):
"""__call__(image, annotations) -> preprocessed """__call__(image, annotations) -> preprocessed
Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function. Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function.
...@@ -55,6 +80,8 @@ class LRPCA (bob.bio.base.preprocessor.Preprocessor): ...@@ -55,6 +80,8 @@ class LRPCA (bob.bio.base.preprocessor.Preprocessor):
The preprocessed image, in default Bob format. The preprocessed image, in default Bob format.
""" """
assert isinstance(image, (pyvision.Image, numpy.ndarray)) assert isinstance(image, (pyvision.Image, numpy.ndarray))
if isinstance(image, numpy.ndarray):
image = self._py_image(image)
# assure that the eye positions are in the set of annotations # assure that the eye positions are in the set of annotations
if annotations is None or 'leye' not in annotations or 'reye' not in annotations: if annotations is None or 'leye' not in annotations or 'reye' not in annotations:
...@@ -74,6 +101,35 @@ class LRPCA (bob.bio.base.preprocessor.Preprocessor): ...@@ -74,6 +101,35 @@ class LRPCA (bob.bio.base.preprocessor.Preprocessor):
return tile.asMatrix2D().transpose() return tile.asMatrix2D().transpose()
def __call__(self, image, annotations):
"""__call__(image, annotations) -> preprocessed
Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function.
**Parameters:**
image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
The gray level or color image that should be preprocessed.
annotations : dict
The eye annotations for the image.
They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.
**Returns:**
preprocessed : numpy.ndarray
The preprocessed image, in default Bob format.
"""
if self.face_detector is not None:
if isinstance(image, pyvision.Image):
# the face detector requires numpy arrays
image = image.asMatrix2D().transpose().astype(numpy.float64)
# call face detector with the (tansformed) image
return self.face_detector.crop_face(image, annotations)
return self.crop_face(image, annotations)
def read_original_data(self, image_file): def read_original_data(self, image_file):
"""read_original_data(image_file) -> image """read_original_data(image_file) -> image
......
...@@ -28,6 +28,8 @@ regenerate_refs = False ...@@ -28,6 +28,8 @@ regenerate_refs = False
import bob.bio.base import bob.bio.base
import bob.bio.face import bob.bio.face
import facerec2010 import facerec2010
import bob.io.base.test_utils
import shutil
from bob.bio.face.test.test_extractors import _compare from bob.bio.face.test.test_extractors import _compare
......
...@@ -52,6 +52,19 @@ def test_lrpca(): ...@@ -52,6 +52,19 @@ def test_lrpca():
# for some reason, LR-PCA produces slightly different outputs on some machines # for some reason, LR-PCA produces slightly different outputs on some machines
_compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2) _compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2)
def test_lrpca_detect():
# create preprocessor including face detector
preprocessor = bob.bio.csu.preprocessor.LRPCA(face_detector='landmark-detect')
# read input
image, annotation = _image(preprocessor), _annotation()
# execute face cropper
reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/lrpca_detected.hdf5')
# for some reason, LR-PCA produces slightly different outputs on some machines
_compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2)
def test_ldair(): def test_ldair():
# load resource # load resource
...@@ -65,3 +78,15 @@ def test_ldair(): ...@@ -65,3 +78,15 @@ def test_ldair():
# execute face cropper # execute face cropper
reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/ldair_preprocessed.hdf5') reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/ldair_preprocessed.hdf5')
_compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1.) _compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1.)
def test_ldair_detect():
# create preprocessor including face detector
preprocessor = bob.bio.csu.preprocessor.LDAIR(face_detector='landmark-detect')
# read input
image, annotation = _image(preprocessor), _annotation()
# execute face cropper
reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/ldair_detected.hdf5')
# for some reason, LR-PCA produces slightly different outputs on some machines
_compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2)
...@@ -37,7 +37,8 @@ develop = src/bob.extension ...@@ -37,7 +37,8 @@ develop = src/bob.extension
src/bob.learn.boosting src/bob.learn.boosting
src/bob.ip.facedetect src/bob.ip.facedetect
src/bob.ip.flandmark src/bob.ip.flandmark
${buildout:csu-dir}/PythonFaceEvaluation src/bob.bio.face
${buildout:csu-dir}
. .
csu-dir = [PATH_TO_YOUR_CSU_COPY] csu-dir = [PATH_TO_YOUR_CSU_COPY]
...@@ -71,6 +72,7 @@ bob.bio.base = git https://github.com/bioidiap/bob.bio.base ...@@ -71,6 +72,7 @@ bob.bio.base = git https://github.com/bioidiap/bob.bio.base
bob.learn.boosting = git https://github.com/bioidiap/bob.learn.boosting bob.learn.boosting = git https://github.com/bioidiap/bob.learn.boosting
bob.ip.facedetect = git https://github.com/bioidiap/bob.ip.facedetect bob.ip.facedetect = git https://github.com/bioidiap/bob.ip.facedetect
bob.ip.flandmark = git https://github.com/bioidiap/bob.ip.flandmark bob.ip.flandmark = git https://github.com/bioidiap/bob.ip.flandmark
bob.bio.face = git https://github.com/bioidiap/bob.bio.face
[scripts] [scripts]
recipe = bob.buildout:scripts recipe = bob.buildout:scripts
......
...@@ -34,9 +34,9 @@ def main(): ...@@ -34,9 +34,9 @@ def main():
raise ValueError("The given directory '%s' does not exist." % sys.argv[1]) raise ValueError("The given directory '%s' does not exist." % sys.argv[1])
base_dir = os.path.join(sys.argv[1], 'PythonFaceEvaluation') base_dir = sys.argv[1]
if not os.path.isdir(base_dir): if not os.path.isdir(base_dir):
raise IOError("The given directory '%s' does not contain a 'PythonFaceEvaluation' subdirectory. Please specify the base directory of the PythonFaceEvaluation toolkit." % sys.argv[1]) raise IOError("The given directory '%s' does not exist. Please specify the directory of the PythonFaceEvaluation toolkit." % sys.argv[1])
if os.path.isfile(os.path.join(base_dir, 'setup.py')): if os.path.isfile(os.path.join(base_dir, 'setup.py')):
raise IOError("The given directory '%s' already seems to be patched." % sys.argv[1]) raise IOError("The given directory '%s' already seems to be patched." % sys.argv[1])
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment