LRPCA.py 4.88 KB
Newer Older
1 2 3 4 5
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :

import facerec2010
import pyvision
6
import PIL
7 8
import numpy
import bob.bio.base
9
import bob.bio.face
10

11
class LRPCA (bob.bio.face.preprocessor.FaceCrop):
Manuel Günther's avatar
Manuel Günther committed
12
  """This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRPCA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
13

Manuel Günther's avatar
Manuel Günther committed
14
  **Parameters:**
15

16
  ``TUNING`` : {}
Manuel Günther's avatar
Manuel Günther committed
17
    The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`.
18 19 20 21

  ``face_detector`` : :py:class:`bob.bio.face.preprocessor.FaceDetect` or str
    The face detector to be used to detect the detected face.
    Might be an instance of a :py:class:`FaceDetect` or the name of a face detector resource.
Manuel Günther's avatar
Manuel Günther committed
22 23
  """

24
  def __init__(self, TUNING = facerec2010.baseline.lrpca.GBU_TUNING, face_detector = None):
Tiago de Freitas Pereira's avatar
Tiago de Freitas Pereira committed
25
    bob.bio.base.preprocessor.Preprocessor.__init__(self, TUNING=str(TUNING), face_detector=str(face_detector), read_original_data=self.read_original_data)
26
    self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
27
    self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
28

29 30 31 32
    if self.face_detector is not None:
      assert isinstance(self.face_detector, bob.bio.face.preprocessor.FaceDetect)
      # asign ourself to be the face cropper that should be used after face detection
      self.face_detector.cropper = self
33

34 35 36 37 38 39 40 41 42 43 44 45 46 47 48
  def _py_image(self, image):
    """Converts the given image to pyvision images."""
    pil_image = PIL.Image.new("L",(image.shape[1],image.shape[0]))
    # TODO: Test if there is any faster method to convert the image type
    for y in range(image.shape[0]):
      for x in range(image.shape[1]):
        # copy image content (re-order [y,x] to (x,y))
        pil_image.putpixel((x,y),image[y,x])

    # convert to pyvision image
    py_image = pyvision.Image(pil_image)
    return py_image


  def crop_face(self, image, annotations):
Manuel Günther's avatar
Manuel Günther committed
49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65
    """__call__(image, annotations) -> preprocessed
    Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function.

    **Parameters:**

    image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
      The gray level or color image that should be preprocessed.

    annotations : dict
      The eye annotations for the image.
      They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.

    **Returns:**

    preprocessed : numpy.ndarray
      The preprocessed image, in default Bob format.
    """
66
    assert isinstance(image, (pyvision.Image, numpy.ndarray))
67 68
    if isinstance(image, numpy.ndarray):
      image = self._py_image(image)
69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87

    # assure that the eye positions are in the set of annotations
    if annotations is None or 'leye' not in annotations or 'reye' not in annotations:
      raise ValueError("The LRPCA image cropping needs eye positions, but they are not given.")

    # Warning! Left and right eye are mixed up here!
    # The lrpca preprocess expects left_eye_x < right_eye_x
    tile = self.lrpca.preprocess(
        image,
        rect=None,
        leye = pyvision.Point(annotations['reye'][1], annotations['reye'][0]),
        reye = pyvision.Point(annotations['leye'][1], annotations['leye'][0])
    )

    # pyvision used images in (x,y)-order.
    # To be consistent to the (y,x)-order in Bob, we have to transpose
    return tile.asMatrix2D().transpose()


88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116
  def __call__(self, image, annotations):
    """__call__(image, annotations) -> preprocessed
    Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function.

    **Parameters:**

    image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
      The gray level or color image that should be preprocessed.

    annotations : dict
      The eye annotations for the image.
      They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.

    **Returns:**

    preprocessed : numpy.ndarray
      The preprocessed image, in default Bob format.
    """
    if self.face_detector is not None:
      if isinstance(image, pyvision.Image):
        # the face detector requires numpy arrays
        image = image.asMatrix2D().transpose().astype(numpy.float64)
      # call face detector with the (tansformed) image
      return self.face_detector.crop_face(image, annotations)

    return self.crop_face(image, annotations)



117
  def read_original_data(self, image_file):
Manuel Günther's avatar
Manuel Günther committed
118 119 120 121 122 123 124 125 126 127 128 129 130 131
    """read_original_data(image_file) -> image

    Reads the original images using functionality from pyvision.

    **Parameters:**

    image_file : str
      The image file to be read, can contain a gray level or a color image.

    **Returns:**

    image : :py:class:`pyvision.Image`
      The image read from file.
    """
132 133
    # we use pyvision to read the images. Hence, we don't have to struggle with conversion here
    return pyvision.Image(str(image_file))