Commit 97ef6a7c authored by Manuel Günther's avatar Manuel Günther
Browse files

First version including image preprocessors

parents
*~
*.swp
*.pyc
bin
eggs
parts
.installed.cfg
.mr.developer.cfg
*.egg-info
src
develop-eggs
sphinx
dist
This diff is collapsed.
include README.rst bootstrap-buildout.py buildout.cfg COPYING version.txt
recursive-include doc *.py *.rst
Example buildout environment
============================
This simple example demonstrates how to wrap Bob-based scripts on buildout
environments. This may be useful for homework assignments, tests or as a way to
distribute code to reproduce your publication. In summary, if you need to give
out code to others, we recommend you do it following this template so your code
can be tested, documented and run in an orderly fashion.
Installation
------------
.. note::
To follow these instructions locally you will need a local copy of this
package. For that, you can use the github tarball API to download the package::
$ wget --no-check-certificate https://github.com/idiap/bob.project.example/tarball/master -O- | tar xz
$ mv idiap-bob.project* bob.project.example
Documentation and Further Information
-------------------------------------
Please refer to the latest Bob user guide, accessing from the `Bob website
<http://idiap.github.com/bob/>`_ for how to create your own packages based on
this example. In particular, the Section entitled `Organize Your Work in
Satellite Packages <http://www.idiap.ch/software/bob/docs/releases/last/sphinx/html/OrganizeYourCode.html>`_
contains details on how to setup, build and roll out your code.
#see http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
#see http://peak.telecommunity.com/DevCenter/setuptools#namespace-packages
__import__('pkg_resources').declare_namespace(__name__)
from . import preprocessor
from . import algorithm
from . import test
def get_config():
"""Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
# gets sphinx autodoc done right - don't remove it
__all__ = [_ for _ in dir() if not _.startswith('_')]
import bob.bio.face
import numpy
preprocessor = bob.bio.face.preprocessor.Base(
color_channel = 'gray',
dtype = numpy.float64
)
#!/usr/bin/env python
import bob.bio.face
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 / 5
# eye positions for frontal images
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT / 5, CROPPED_IMAGE_WIDTH / 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT / 5, CROPPED_IMAGE_WIDTH / 4 * 3)
# define the preprocessor
preprocessor = bob.bio.face.preprocessor.FaceCrop(
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
cropped_positions = {'leye' : LEFT_EYE_POS, 'reye' : RIGHT_EYE_POS}
)
#!/usr/bin/env python
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.FaceDetect(
face_cropper = 'face-crop-eyes'
)
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.HistogramEqualization(
face_cropper = 'face-crop-eyes'
)
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper = 'face-crop-eyes'
)
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.SelfQuotientImage(
face_cropper = 'face-crop-eyes'
)
import bob.bio.face
preprocessor = bob.bio.face.preprocessor.TanTriggs(
face_cropper = 'face-crop-eyes'
)
import numpy
import bob.io.image
import bob.ip.color
from bob.bio.base.preprocessor import Preprocessor
class Base (Preprocessor):
"""Performs color space adaptations and data type corrections for the given image"""
def __init__(self, dtype = None, color_channel = 'gray'):
"""Parameters of the constructor of this preprocessor:
dtype : :py:class:`numpy.dtype` or convertible or ``None``
The data type that the resulting image will have
color_channel : one of ``('gray', 'red', 'gren', 'blue')`` or ``None``
The specific color channel, which should be extracted from the image
"""
Preprocessor.__init__(self, dtype=str(dtype), color_channel=color_channel)
self.channel = color_channel
self.dtype = dtype
def color_channel(self, image):
"""Returns the desired channel of the given image. Currently, gray, red, green and blue channels are supported."""
if self.channel is None:
return image
if image.ndim == 2:
if self.channel != 'gray':
raise ValueError("There is no rule to extract a " + channel + " image from a gray level image!")
return image
if self.channel == 'gray':
return bob.ip.color.rgb_to_gray(image)
if self.channel == 'red':
return image[0,:,:]
if self.channel == 'green':
return image[1,:,:]
if self.channel == 'blue':
return image[2,:,:]
raise ValueError("The image channel '%s' is not known or not yet implemented", self.channel)
def data_type(self, image):
if self.dtype is not None:
image = image.astype(self.dtype)
return image
def __call__(self, image, annotations = None):
"""Just perform gray scale conversion, ignore the annotations."""
assert isinstance(image, numpy.ndarray) and image.ndim in (2,3)
# convert to grayscale
image = self.color_channel(image)
return self.data_type(image)
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
# @date: Thu May 24 10:41:42 CEST 2012
#
# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, version 3 of the License.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import bob.ip.base
import numpy
from .Base import Base
from bob.bio.base.preprocessor import Preprocessor
class FaceCrop (Base):
"""Crops the face according to the given annotations"""
def __init__(
self,
cropped_image_size, # resolution of the cropped image, in order (HEIGHT,WIDTH); if not given, no face cropping will be performed
cropped_positions, # dictionary of the cropped positions, usually: {'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}
fixed_positions = None, # dictionary of FIXED positions in the original image; if specified, annotations from the database will be ignored
mask_sigma = None, # The sigma for random values areas outside image
mask_neighbors = 5, # The number of neighbors to consider while extrapolating
mask_seed = None, # The seed for generating random values during extrapolation
**kwargs # parameters to be written in the __str__ method
):
"""Parameters of the constructor of this preprocessor:
cropped_image_size : (int, int)
The size of the resulting cropped images.
cropped_positions : dict
The coordinates in the cropped image, where the annotated points should be put to.
This parameter is a dictionary with usually two elements, e.g., ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : ...}`` are supported, as long as the ``annotations`` in the :py:meth:`__call__` function are present.
fixed_positions : dict or None
If specified, ignore the annotations from the database and use these fixed positions throughout.
mask_sigma : float or None
Fill the area outside of image boundaries with random pixels from the border, by adding noise to the pixel values.
To disable extrapolation, set this value to None.
To disable adding random noise, set it to a negative value or 0.
mask_neighbors : int
The number of neighbors used during mask extrapolation.
See :py:func:`bob.ip.base.extrapolate_mask` for details.
mask_seed : int or None
The random seed to apply for mask extrapolation.
.. warning::
When run in parallel, the same random seed will be applied to all parallel processes.
Hence, results of parallel execution will differ from the results in serial execution.
kwargs
Remaining keyword parameters passed to the :py:class:`Base` constructor, such as ``color_channel`` or ``dtype``.
"""
Base.__init__(self, **kwargs)
# call base class constructor
Preprocessor.__init__(
self,
cropped_image_size = cropped_image_size,
cropped_positions = cropped_positions,
fixed_positions = fixed_positions,
mask_sigma = mask_sigma,
mask_neighbors = mask_neighbors,
mask_seed = mask_seed
)
# check parameters
assert len(cropped_positions) == 2
if fixed_positions:
assert len(fixed_positions) == 2
# copy parameters
self.cropped_image_size = cropped_image_size
self.cropped_positions = cropped_positions
self.cropped_keys = sorted(cropped_positions.keys())
self.fixed_positions = fixed_positions
self.mask_sigma = mask_sigma
self.mask_neighbors = mask_neighbors
self.mask_rng = bob.core.random.mt19937(mask_seed) if mask_seed is not None else bob.core.random.mt19937()
# create objects required for face cropping
self.cropper = bob.ip.base.FaceEyesNorm(crop_size=cropped_image_size, right_eye=cropped_positions[self.cropped_keys[0]], left_eye=cropped_positions[self.cropped_keys[1]])
self.cropped_mask = numpy.ndarray(cropped_image_size, numpy.bool)
def crop_face(self, image, annotations = None):
"""Executes the face cropping on the given image and returns the cropped version of it"""
if self.fixed_positions is not None:
annotations = self.fixed_positions
if annotations is None:
raise ValueError("Cannot perform image cropping since annotations are not given, and no fixed annotations are specified.")
assert isinstance(annotations, dict)
if not all(k in annotations for k in self.cropped_keys):
raise ValueError("At least one of the expected annotations '%s' are not given in '%s'." % (self.cropped_keys, annotations.keys()))
# create output
mask = numpy.ones(image.shape, dtype=numpy.bool)
cropped_image = numpy.zeros(self.cropped_image_size)
self.cropped_mask[:] = False
# perform the cropping
self.cropper(
image, # input image
mask, # full input mask
cropped_image, # cropped image
self.cropped_mask, # cropped mask
right_eye = annotations[self.cropped_keys[0]], # position of first annotation, usually right eye
left_eye = annotations[self.cropped_keys[1]] # position of second annotation, usually left eye
)
if self.mask_sigma is not None:
# extrapolate the mask so that pixels outside of the image original image region are filled with border pixels
bob.ip.base.extrapolate_mask(self.cropped_mask, cropped_image, self.mask_sigma, self.mask_neighbors, self.mask_rng)
return cropped_image
def __call__(self, image, annotations = None):
"""Aligns the given image according to the given annotations."""
# convert to the desired color channel
image = self.color_channel(image)
# crop face
image = self.crop_face(image, annotations)
# convert data type
return self.data_type(image)
import math
import numpy
import bob.ip.facedetect
import bob.ip.flandmark
import bob.ip.base
import numpy
from .Base import Base
from .utils import load_cropper_only
from bob.bio.base.preprocessor import Preprocessor
class FaceDetect (Base):
def __init__(
self,
face_cropper,
cascade = None,
use_flandmark = False,
detection_overlap = 0.2,
distance = 2,
scale_base = math.pow(2., -1./16.),
lowest_scale = 0.125,
mask_sigma = None, # The sigma for random values areas outside image
mask_neighbors = 5, # The number of neighbors to consider while extrapolating
mask_seed = None, # The seed for generating random values during extrapolation
**kwargs
):
"""Performs a face detection in the given image (ignoring any annotations)."""
# call base class constructors
Base.__init__(self, **kwargs)
Preprocessor.__init__(
self,
face_cropper = face_cropper,
cascade = cascade,
use_flandmark = use_flandmark,
detection_overlap = detection_overlap,
distance = distance,
scale_base = scale_base,
lowest_scale = lowest_scale
)
self.sampler = bob.ip.facedetect.Sampler(scale_factor=scale_base, lowest_scale=lowest_scale, distance=distance)
if cascade is None:
self.cascade = bob.ip.facedetect.default_cascade()
else:
self.cascade = bob.ip.facedetect.Cascade(bob.io.base.HDF5File(cascade))
self.detection_overlap = detection_overlap
self.flandmark = bob.ip.flandmark.Flandmark() if use_flandmark else None
self.quality = None
self.cropper = load_cropper_only(face_cropper)
def _landmarks(self, image, bounding_box):
# get the landmarks in the face
if self.flandmark is not None:
# use the flandmark detector
uint8_image = image.astype(numpy.uint8)
# make the bounding box square shape by extending the horizontal position by 2 pixels times width/20
bb = bob.ip.facedetect.BoundingBox(topleft = (bounding_box.top_f, bounding_box.left_f - bounding_box.size[1] / 10.), size = bounding_box.size)
top = max(bb.top, 0)
left = max(bb.left, 0)
bottom = min(bb.bottom, image.shape[0])
right = min(bb.right, image.shape[1])
landmarks = self.flandmark.locate(uint8_image, top, left, bottom-top, right-left)
if landmarks is not None and len(landmarks):
return {
'reye' : ((landmarks[1][0] + landmarks[5][0])/2., (landmarks[1][1] + landmarks[5][1])/2.),
'leye' : ((landmarks[2][0] + landmarks[6][0])/2., (landmarks[2][1] + landmarks[6][1])/2.)
}
else:
utils.warn("Could not detect landmarks -- using estimated landmarks")
# estimate from default locations
return bob.ip.facedetect.expected_eye_positions(bounding_box)
def crop_face(self, image, annotations=None):
# detect the face
bounding_box, self.quality = bob.ip.facedetect.detect_single_face(image, self.cascade, self.sampler, self.detection_overlap)
# get the eye landmarks
annotations = self._landmarks(image, bounding_box)
# apply face cropping
return self.cropper.crop_face(image, annotations)
def __call__(self, image, annotations=None):
# convert to the desired color channel
image = self.color_channel(image)
# detect face and crop it
image = self.crop_face(image)
# convert data type
return self.data_type(image)
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment