diff --git a/bob/bio/face/__init__.py b/bob/bio/face/__init__.py
index 6438579be1eceed2d158323614af4445e8d60374..e672118fa85a1fe83c486efa4e2119cc327c00c5 100644
--- a/bob/bio/face/__init__.py
+++ b/bob/bio/face/__init__.py
@@ -1,4 +1,5 @@
 from . import preprocessor
+from . import extractor
 from . import algorithm
 
 from . import test
diff --git a/bob/bio/face/config/extractor/__init__.py b/bob/bio/face/config/extractor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/bob/bio/face/config/extractor/dct_blocks.py b/bob/bio/face/config/extractor/dct_blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..87fc18fa206e16c1b3309098c954b0ce0688f7e8
--- /dev/null
+++ b/bob/bio/face/config/extractor/dct_blocks.py
@@ -0,0 +1,9 @@
+#!/usr/bin/env python
+
+import bob.bio.face
+
+extractor = bob.bio.face.extractor.DCTBlocks(
+    block_size = 12,
+    block_overlap = 11,
+    number_of_dct_coefficients = 45
+)
diff --git a/bob/bio/face/config/extractor/eigenface.py b/bob/bio/face/config/extractor/eigenface.py
new file mode 100644
index 0000000000000000000000000000000000000000..7fae79ed1297536894ec5948bb08fa9176ed3c91
--- /dev/null
+++ b/bob/bio/face/config/extractor/eigenface.py
@@ -0,0 +1,8 @@
+#!/usr/bin/env python
+
+import bob.bio.face
+
+# compute eigenfaces using the training database
+extractor = bob.bio.face.extractor.Eigenface(
+    subspace_dimension = 100
+)
diff --git a/bob/bio/face/config/extractor/grid_graph.py b/bob/bio/face/config/extractor/grid_graph.py
new file mode 100644
index 0000000000000000000000000000000000000000..02b93c0a7b0f8331fc4495b6453c007450850240
--- /dev/null
+++ b/bob/bio/face/config/extractor/grid_graph.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python
+
+import bob.bio.base
+import bob.bio.face
+import math
+
+# load the face cropping parameters
+cropper = bob.bio.base.load_resource("face-crop-eyes", "preprocessor")
+
+extractor = bob.bio.face.extractor.GridGraph(
+    # Gabor parameters
+    gabor_sigma = math.sqrt(2.) * math.pi,
+
+    # what kind of information to extract
+    normalize_gabor_jets = True,
+
+    # setup of the fixed grid
+    node_distance = (4, 4),
+    first_node = (6, 6),
+    image_resolution = cropper.cropped_image_size
+)
diff --git a/bob/bio/face/config/extractor/lgbphs.py b/bob/bio/face/config/extractor/lgbphs.py
new file mode 100644
index 0000000000000000000000000000000000000000..f4f6e9f2595d951dd86807a779463b90d96483c8
--- /dev/null
+++ b/bob/bio/face/config/extractor/lgbphs.py
@@ -0,0 +1,17 @@
+#!/usr/bin/env python
+
+import bob.bio.face
+import math
+
+# feature extraction
+extractor = bob.bio.face.extractor.LGBPHS(
+    # block setup
+    block_size = 10,
+    block_overlap = 4,
+    # Gabor parameters
+    gabor_sigma = math.sqrt(2.) * math.pi,
+    # LBP setup (we use the defaults)
+
+    # histogram setup
+    sparse_histogram = True
+)
diff --git a/bob/bio/face/extractor/DCTBlocks.py b/bob/bio/face/extractor/DCTBlocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..c57ed7d417f95f5ac313281ce286931a256ba816
--- /dev/null
+++ b/bob/bio/face/extractor/DCTBlocks.py
@@ -0,0 +1,57 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Laurent El Shafey <Laurent.El-Shafey@idiap.ch>
+
+"""Features for face recognition"""
+
+import bob.ip.base
+import numpy
+
+from bob.bio.base.extractor import Extractor
+
+class DCTBlocks (Extractor):
+
+  """Extracts DCT blocks"""
+  def __init__(
+      self,
+      block_size = 12,    # 1 or two parameters for block size
+      block_overlap = 11, # 1 or two parameters for block overlap
+      number_of_dct_coefficients = 45,
+      normalize_blocks = True,
+      normalize_dcts = True,
+      auto_reduce_coefficients = False
+  ):
+
+    # call base class constructor
+    Extractor.__init__(
+        self,
+        block_size = block_size,
+        block_overlap = block_overlap,
+        number_of_dct_coefficients = number_of_dct_coefficients,
+        normalize_blocks = normalize_blocks,
+        normalize_dcts = normalize_dcts,
+        auto_reduce_coefficients = auto_reduce_coefficients
+    )
+
+    # block parameters
+    block_size = block_size if isinstance(block_size, (tuple, list)) else (block_size, block_size)
+    block_overlap = block_overlap if isinstance(block_overlap, (tuple, list)) else (block_overlap, block_overlap)
+
+    if block_size[0] < block_overlap[0] or block_size[1] < block_overlap[1]:
+      raise ValueError("The overlap '%s' is bigger than the block size '%s'. This won't work. Please check your setup!"%(block_overlap, block_size))
+    if block_size[0] * block_size[1] <= number_of_dct_coefficients:
+      if auto_reduce_coefficients:
+        number_of_dct_coefficients = block_size[0] * block_size[1] - 1
+      else:
+        raise ValueError("You selected more coefficients %d than your blocks have %d. This won't work. Please check your setup!"%(number_of_dct_coefficients, block_size[0] * block_size[1]))
+
+    self.dct_features = bob.ip.base.DCTFeatures(number_of_dct_coefficients, block_size, block_overlap, normalize_blocks, normalize_dcts)
+
+  def __call__(self, image):
+    """Computes and returns DCT blocks for the given input image"""
+    assert isinstance(image, numpy.ndarray)
+    assert image.ndim == 2
+    assert image.dtype == numpy.float64
+
+    # Computes DCT features
+    return self.dct_features(image)
diff --git a/bob/bio/face/extractor/Eigenface.py b/bob/bio/face/extractor/Eigenface.py
new file mode 100644
index 0000000000000000000000000000000000000000..ad8bb884b1df7b4e3f516151978bdd3389394ab4
--- /dev/null
+++ b/bob/bio/face/extractor/Eigenface.py
@@ -0,0 +1,54 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Manuel Guenther <Manuel.Guenther@idiap.ch>
+
+import numpy
+
+import bob.learn.linear
+import bob.io.base
+
+from bob.bio.base.extractor import Extractor
+
+import logging
+logger = logging.getLogger("bob.bio.face")
+
+class Eigenface (Extractor):
+  """Extracts grid graphs from the images"""
+
+  def __init__(self, subspace_dimension):
+    # We have to register that this function will need a training step
+    Extractor.__init__(self, requires_training = True, subspace_dimension = subspace_dimension)
+    self.subspace_dimension = subspace_dimension
+
+
+  def _check_data(self, data):
+    assert isinstance(data, numpy.ndarray)
+    assert data.ndim == 2
+    assert data.dtype == numpy.float64
+
+
+  def train(self, image_list, extractor_file):
+    """Trains the eigenface extractor using the given list of training images"""
+    [self._check_data(image) for image in image_list]
+
+    # Initializes an array for the data
+    data = numpy.vstack([image.flatten() for image in image_list])
+
+    logger.info("  -> Training LinearMachine using PCA (SVD)")
+    t = bob.learn.linear.PCATrainer()
+    self.machine, __eig_vals = t.train(data)
+    # Machine: get shape, then resize
+    self.machine.resize(self.machine.shape[0], self.subspace_dimension)
+    self.machine.save(bob.io.base.HDF5File(extractor_file, "w"))
+
+
+  def load(self, extractor_file):
+    # read PCA projector
+    self.machine = bob.learn.linear.Machine(bob.io.base.HDF5File(extractor_file))
+
+
+  def __call__(self, image):
+    """Projects the data using the stored covariance matrix"""
+    self._check_data(image)
+    # Projects the data
+    return self.machine(image.flatten())
diff --git a/bob/bio/face/extractor/GridGraph.py b/bob/bio/face/extractor/GridGraph.py
new file mode 100644
index 0000000000000000000000000000000000000000..783521cdc4c3bce0bcb060087d5a8a3325bf88af
--- /dev/null
+++ b/bob/bio/face/extractor/GridGraph.py
@@ -0,0 +1,136 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Manuel Guenther <Manuel.Guenther@idiap.ch>
+
+import bob.ip.gabor
+import bob.io.base
+
+import numpy
+import math
+from bob.bio.base.extractor import Extractor
+
+class GridGraph (Extractor):
+  """Extracts grid graphs from the images"""
+
+  def __init__(
+      self,
+      # Gabor parameters
+      gabor_directions = 8,
+      gabor_scales = 5,
+      gabor_sigma = 2. * math.pi,
+      gabor_maximum_frequency = math.pi / 2.,
+      gabor_frequency_step = math.sqrt(.5),
+      gabor_power_of_k = 0,
+      gabor_dc_free = True,
+
+      # what kind of information to extract
+      normalize_gabor_jets = True,
+
+      # setup of the aligned grid
+      eyes = None, # if set, the grid setup will be aligned to the eye positions {'leye' : LEFT_EYE_POS, 'reye' : RIGHT_EYE_POS},
+      nodes_between_eyes = 4,
+      nodes_along_eyes = 2,
+      nodes_above_eyes = 3,
+      nodes_below_eyes = 7,
+
+      # setup of static grid
+      node_distance = None,    # one or two integral values
+      image_resolution = None, # always two integral values
+      first_node = None,       # one or two integral values, or None -> automatically determined
+  ):
+
+    # call base class constructor
+    Extractor.__init__(
+        self,
+
+        gabor_directions = gabor_directions,
+        gabor_scales = gabor_scales,
+        gabor_sigma = gabor_sigma,
+        gabor_maximum_frequency = gabor_maximum_frequency,
+        gabor_frequency_step = gabor_frequency_step,
+        gabor_power_of_k = gabor_power_of_k,
+        gabor_dc_free = gabor_dc_free,
+        normalize_gabor_jets = normalize_gabor_jets,
+        eyes = eyes,
+        nodes_between_eyes = nodes_between_eyes,
+        nodes_along_eyes = nodes_along_eyes,
+        nodes_above_eyes = nodes_above_eyes,
+        nodes_below_eyes = nodes_below_eyes,
+        node_distance = node_distance,
+        image_resolution = image_resolution,
+        first_node = first_node
+    )
+
+    # create Gabor wavelet transform class
+    self.gwt = bob.ip.gabor.Transform(
+        number_of_scales = gabor_scales,
+        number_of_directions = gabor_directions,
+        sigma = gabor_sigma,
+        k_max = gabor_maximum_frequency,
+        k_fac = gabor_frequency_step,
+        power_of_k = gabor_power_of_k,
+        dc_free = gabor_dc_free
+    )
+
+    # create graph extractor
+    if eyes is not None:
+      self.graph = bob.ip.gabor.Graph(
+          righteye = [int(e) for e in eyes['reye']],
+          lefteye = [int(e) for e in eyes['leye']],
+          between = int(nodes_between_eyes),
+          along = int(nodes_along_eyes),
+          above = int(nodes_above_eyes),
+          below = int(nodes_below_eyes)
+      )
+    else:
+      if node_distance is None or image_resolution is None:
+        raise ValueError("Please specify either 'eyes' or the grid parameters 'first_node', 'last_node', and 'node_distance'!")
+      if isinstance(node_distance, (int, float)):
+         node_distance = (int(node_distance), int(node_distance))
+      if first_node is None:
+        first_node = [0,0]
+        for i in (0,1):
+          offset = int((image_resolution[i] - int(image_resolution[i]/node_distance[i])*node_distance[i]) / 2)
+          if offset < node_distance[i]//2: # This is not tested, but should ALWAYS be the case.
+            offset += node_distance[i]//2
+          first_node[i] = offset
+      last_node = tuple([int(image_resolution[i] - max(first_node[i],1)) for i in (0,1)])
+
+      # take the specified nodes
+      self.graph = bob.ip.gabor.Graph(
+          first = first_node,
+          last = last_node,
+          step = node_distance
+      )
+
+    self.normalize_jets = normalize_gabor_jets
+    self.trafo_image = None
+
+  def __call__(self, image):
+    assert image.ndim == 2
+    assert isinstance(image, numpy.ndarray)
+    assert image.dtype == numpy.float64
+
+    if self.trafo_image is None or self.trafo_image.shape[1:3] != image.shape:
+      # create trafo image
+      self.trafo_image = numpy.ndarray((self.gwt.number_of_wavelets, image.shape[0], image.shape[1]), numpy.complex128)
+
+    # perform Gabor wavelet transform
+    self.gwt.transform(image, self.trafo_image)
+
+    # extract face graph
+    jets = self.graph.extract(self.trafo_image)
+
+    # normalize the Gabor jets of the graph only
+    if self.normalize_jets:
+      [j.normalize() for j in jets]
+
+    # return the extracted face graph
+    return jets
+
+  def write_feature(self, feature, feature_file):
+    feature_file = feature_file if isinstance(feature_file, bob.io.base.HDF5File) else bob.io.base.HDF5File(feature_file, 'w')
+    bob.ip.gabor.save_jets(feature, feature_file)
+
+  def read_feature(self, feature_file):
+    return bob.ip.gabor.load_jets(bob.io.base.HDF5File(feature_file))
diff --git a/bob/bio/face/extractor/LGBPHS.py b/bob/bio/face/extractor/LGBPHS.py
new file mode 100644
index 0000000000000000000000000000000000000000..35d60bda08fe50bdbee13802b5687e804de527b5
--- /dev/null
+++ b/bob/bio/face/extractor/LGBPHS.py
@@ -0,0 +1,195 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# Manuel Guenther <Manuel.Guenther@idiap.ch>
+
+import bob.ip.gabor
+import bob.ip.base
+
+import numpy
+import math
+
+from bob.bio.base.extractor import Extractor
+
+class LGBPHS (Extractor):
+  """Extractor for local Gabor binary pattern histogram sequences"""
+
+  def __init__(
+      self,
+      # Block setup
+      block_size,    # one or two parameters for block size
+      block_overlap = 0, # one or two parameters for block overlap
+      # Gabor parameters
+      gabor_directions = 8,
+      gabor_scales = 5,
+      gabor_sigma = 2. * math.pi,
+      gabor_maximum_frequency = math.pi / 2.,
+      gabor_frequency_step = math.sqrt(.5),
+      gabor_power_of_k = 0,
+      gabor_dc_free = True,
+      use_gabor_phases = False,
+      # LBP parameters
+      lbp_radius = 2,
+      lbp_neighbor_count = 8,
+      lbp_uniform = True,
+      lbp_circular = True,
+      lbp_rotation_invariant = False,
+      lbp_compare_to_average = False,
+      lbp_add_average = False,
+      # histogram options
+      sparse_histogram = False,
+      split_histogram = None
+  ):
+    """Initializes the local Gabor binary pattern histogram sequence tool chain with the given file selector object"""
+
+    # call base class constructor
+    Extractor.__init__(
+        self,
+
+        block_size = block_size,
+        block_overlap = block_overlap,
+        gabor_directions = gabor_directions,
+        gabor_scales = gabor_scales,
+        gabor_sigma = gabor_sigma,
+        gabor_maximum_frequency = gabor_maximum_frequency,
+        gabor_frequency_step = gabor_frequency_step,
+        gabor_power_of_k = gabor_power_of_k,
+        gabor_dc_free = gabor_dc_free,
+        use_gabor_phases = use_gabor_phases,
+        lbp_radius = lbp_radius,
+        lbp_neighbor_count = lbp_neighbor_count,
+        lbp_uniform = lbp_uniform,
+        lbp_circular = lbp_circular,
+        lbp_rotation_invariant = lbp_rotation_invariant,
+        lbp_compare_to_average = lbp_compare_to_average,
+        lbp_add_average = lbp_add_average,
+        sparse_histogram = sparse_histogram,
+        split_histogram = split_histogram
+    )
+
+    # block parameters
+    self.block_size = block_size if isinstance(block_size, (tuple, list)) else (block_size, block_size)
+    self.block_overlap = block_overlap if isinstance(block_overlap, (tuple, list)) else (block_overlap, block_overlap)
+    if self.block_size[0] < self.block_overlap[0] or self.block_size[1] < self.block_overlap[1]:
+      raise ValueError("The overlap is bigger than the block size. This won't work. Please check your setup!")
+
+    # Gabor wavelet transform class
+    self.gwt = bob.ip.gabor.Transform(
+        number_of_scales = gabor_scales,
+        number_of_directions = gabor_directions,
+        sigma = gabor_sigma,
+        k_max = gabor_maximum_frequency,
+        k_fac = gabor_frequency_step,
+        power_of_k = gabor_power_of_k,
+        dc_free = gabor_dc_free
+    )
+    self.trafo_image = None
+    self.use_phases = use_gabor_phases
+
+    self.lbp = bob.ip.base.LBP(
+        neighbors = lbp_neighbor_count,
+        radius = float(lbp_radius),
+        circular = lbp_circular,
+        to_average = lbp_compare_to_average,
+        add_average_bit = lbp_add_average,
+        uniform = lbp_uniform,
+        rotation_invariant = lbp_rotation_invariant,
+        border_handling = 'wrap'
+    )
+
+    self.split = split_histogram
+    self.sparse = sparse_histogram
+    if self.sparse and self.split:
+      raise ValueError("Sparse histograms cannot be split! Check your setup!")
+
+
+  def _fill(self, lgbphs_array, lgbphs_blocks, j):
+    """Copies the given array into the given blocks"""
+    # fill array in the desired shape
+    if self.split is None:
+      start = j * self.n_bins * self.n_blocks
+      for b in range(self.n_blocks):
+        lgbphs_array[start + b * self.n_bins : start + (b+1) * self.n_bins] = lgbphs_blocks[b][:]
+    elif self.split == 'blocks':
+      for b in range(self.n_blocks):
+        lgbphs_array[b, j * self.n_bins : (j+1) * self.n_bins] = lgbphs_blocks[b][:]
+    elif self.split == 'wavelets':
+      for b in range(self.n_blocks):
+        lgbphs_array[j, b * self.n_bins : (b+1) * self.n_bins] = lgbphs_blocks[b][:]
+    elif self.split == 'both':
+      for b in range(self.n_blocks):
+        lgbphs_array[j * self.n_blocks + b, 0 : self.n_bins] = lgbphs_blocks[b][:]
+
+  def _sparsify(self, array):
+    """This function generates a sparse histogram from a non-sparse one."""
+    if not self.sparse:
+      return array
+    if len(array.shape) == 2 and array.shape[0] == 2:
+      # already sparse
+      return array
+    assert len(array.shape) == 1
+    indices = []
+    values = []
+    for i in range(array.shape[0]):
+      if array[i] != 0.:
+        indices.append(i)
+        values.append(array[i])
+    return numpy.array([indices, values], dtype = numpy.float64)
+
+
+  def __call__(self, image):
+    """Extracts the local Gabor binary pattern histogram sequence from the given image"""
+    assert image.ndim == 2
+    assert isinstance(image, numpy.ndarray)
+    assert image.dtype == numpy.float64
+
+    # perform GWT on image
+    if self.trafo_image is None or self.trafo_image.shape[1:3] != image.shape:
+      # create trafo image
+      self.trafo_image = numpy.ndarray((self.gwt.number_of_wavelets, image.shape[0], image.shape[1]), numpy.complex128)
+
+    # perform Gabor wavelet transform
+    self.gwt.transform(image, self.trafo_image)
+
+    jet_length = self.gwt.number_of_wavelets * (2 if self.use_phases else 1)
+
+    lgbphs_array = None
+    # iterate through the layers of the trafo image
+    for j in range(self.gwt.number_of_wavelets):
+      # compute absolute part of complex response
+      abs_image = numpy.abs(self.trafo_image[j])
+      # Computes LBP histograms
+      abs_blocks = bob.ip.base.lbphs(abs_image, self.lbp, self.block_size, self.block_overlap)
+
+      # Converts to Blitz array (of different dimensionalities)
+      self.n_bins = abs_blocks.shape[1]
+      self.n_blocks = abs_blocks.shape[0]
+
+      if self.split is None:
+        shape = (self.n_blocks * self.n_bins * jet_length,)
+      elif self.split == 'blocks':
+        shape = (self.n_blocks, self.n_bins * jet_length)
+      elif self.split == 'wavelets':
+        shape = (jet_length, self.n_bins * self.n_blocks)
+      elif self.split == 'both':
+        shape = (jet_length * self.n_blocks, self.n_bins)
+      else:
+        raise ValueError("The split parameter must be one of ['blocks', 'wavelets', 'both'] or None")
+
+      # create new array if not done yet
+      if lgbphs_array is None:
+        lgbphs_array = numpy.ndarray(shape, 'float64')
+
+      # fill the array with the absolute values of the Gabor wavelet transform
+      self._fill(lgbphs_array, abs_blocks, j)
+
+      if self.use_phases:
+        # compute phase part of complex response
+        phase_image = numpy.angle(self.trafo_image[j])
+        # Computes LBP histograms
+        phase_blocks = bob.ip.base.lbphs(phase_image, self.lbp, self.block_size, self.block_overlap)
+        # fill the array with the phases at the end of the blocks
+        self._fill(lgbphs_array, phase_blocks, j + self.gwt.number_of_wavelets)
+
+
+    # return the concatenated list of all histograms
+    return self._sparsify(lgbphs_array)
diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee10aebc0ff20b0d8f083c59c7bc178a9cf6fae6
--- /dev/null
+++ b/bob/bio/face/extractor/__init__.py
@@ -0,0 +1,4 @@
+from .DCTBlocks import DCTBlocks
+from .GridGraph import GridGraph
+from .LGBPHS import LGBPHS
+from .Eigenface import Eigenface
diff --git a/bob/bio/face/test/__init__.py b/bob/bio/face/test/__init__.py
index 77d1ede44a3aed0ce4bfde00a6288787622a17c7..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391 100644
--- a/bob/bio/face/test/__init__.py
+++ b/bob/bio/face/test/__init__.py
@@ -1 +0,0 @@
-from . import dummy
diff --git a/bob/bio/face/test/data/dct_blocks.hdf5 b/bob/bio/face/test/data/dct_blocks.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..eab2357c2f409fe5998c4fb9cc039a6d4588015a
Binary files /dev/null and b/bob/bio/face/test/data/dct_blocks.hdf5 differ
diff --git a/bob/bio/face/test/data/eigenface_extractor.hdf5 b/bob/bio/face/test/data/eigenface_extractor.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..6ad82a14fb2c12831e8be5c04636c0a42bf27946
Binary files /dev/null and b/bob/bio/face/test/data/eigenface_extractor.hdf5 differ
diff --git a/bob/bio/face/test/data/eigenface_feature.hdf5 b/bob/bio/face/test/data/eigenface_feature.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..a870fb331ebe483db4a9ca5da8da339fadfbafeb
Binary files /dev/null and b/bob/bio/face/test/data/eigenface_feature.hdf5 differ
diff --git a/bob/bio/face/test/data/graph_regular.hdf5 b/bob/bio/face/test/data/graph_regular.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..671b7f734c00ba4efe5105c3a47ff2a1f5c6dc63
Binary files /dev/null and b/bob/bio/face/test/data/graph_regular.hdf5 differ
diff --git a/bob/bio/face/test/data/lgbphs_sparse.hdf5 b/bob/bio/face/test/data/lgbphs_sparse.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..f29fdc3a5cfc324de9f8f591ff4141c2c6708661
Binary files /dev/null and b/bob/bio/face/test/data/lgbphs_sparse.hdf5 differ
diff --git a/bob/bio/face/test/data/lgbphs_with_phase.hdf5 b/bob/bio/face/test/data/lgbphs_with_phase.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..81501170561de7bf41f2fd4524fccf381182871c
Binary files /dev/null and b/bob/bio/face/test/data/lgbphs_with_phase.hdf5 differ
diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1dfc0762f34f152a17ee360ef7fa880f0859180
--- /dev/null
+++ b/bob/bio/face/test/test_extractors.py
@@ -0,0 +1,210 @@
+#!/usr/bin/env python
+# vim: set fileencoding=utf-8 :
+# @author: Manuel Guenther <Manuel.Guenther@idiap.ch>
+# @date: Thu May 24 10:41:42 CEST 2012
+#
+# Copyright (C) 2011-2012 Idiap Research Institute, Martigny, Switzerland
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, version 3 of the License.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with this program.  If not, see <http://www.gnu.org/licenses/>.
+
+import bob.bio.base
+import bob.bio.face
+
+import unittest
+import os
+import numpy
+import math
+from nose.plugins.skip import SkipTest
+
+import bob.io.base.test_utils
+from bob.bio.base.test import utils
+
+import pkg_resources
+
+regenerate_refs = False
+
+def _compare(data, reference, write_function = bob.bio.base.save, read_function = bob.bio.base.load):
+  # write reference?
+  if regenerate_refs:
+    write_function(data, reference)
+
+  # compare reference
+  reference = read_function(reference)
+  assert numpy.allclose(data, reference, atol=1e-5)
+
+def _data():
+  return bob.bio.base.load(pkg_resources.resource_filename('bob.bio.face.test', 'data/cropped.hdf5'))
+
+
+def test_dct_blocks():
+  # read input
+  data = _data()
+  dct = bob.bio.base.load_resource('dct-blocks', 'extractor')
+  assert isinstance(dct, bob.bio.face.extractor.DCTBlocks)
+  assert isinstance(dct, bob.bio.base.extractor.Extractor)
+  assert not dct.requires_training
+
+  # generate smaller extractor, using mixed tuple and int input for the block size and overlap
+  dct = bob.bio.face.extractor.DCTBlocks(8, (0,0), 15)
+
+  # extract features
+  feature = dct(data)
+  assert feature.ndim == 2
+  # feature dimension is one lower than the block size, since blocks are normalized by default
+  assert feature.shape == (80, 14)
+  reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/dct_blocks.hdf5')
+  _compare(feature, reference, dct.write_feature, dct.read_feature)
+
+
+def test_graphs():
+  data = _data()
+  graph = bob.bio.base.load_resource('grid-graph', 'extractor')
+  assert isinstance(graph, bob.bio.face.extractor.GridGraph)
+  assert isinstance(graph, bob.bio.base.extractor.Extractor)
+  assert not graph.requires_training
+
+  # generate smaller extractor, using mixed tuple and int input for the node distance and first location
+  graph = bob.bio.face.extractor.GridGraph(node_distance = 24, image_resolution = data.shape)
+
+  # extract features
+  feature = graph(data)
+
+  reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/graph_regular.hdf5')
+  # write reference?
+  if regenerate_refs:
+    graph.write_feature(feature, reference)
+
+  # compare reference
+  reference = graph.read_feature(reference)
+  assert len(reference) == len(feature)
+  assert all(isinstance(f, bob.ip.gabor.Jet) for f in feature)
+  assert all(numpy.allclose(r.jet, f.jet) for r,f in zip(reference, feature))
+
+
+  # get reference face graph extractor
+  cropper = bob.bio.base.load_resource('face-crop-eyes', 'preprocessor')
+  eyes = cropper.cropped_positions
+  # generate aligned graph extractor
+  graph = bob.bio.face.extractor.GridGraph(
+    # setup of the aligned grid
+    eyes = eyes,
+    nodes_between_eyes = 4,
+    nodes_along_eyes = 2,
+    nodes_above_eyes = 2,
+    nodes_below_eyes = 7
+  )
+
+  nodes = graph.graph.nodes
+  assert len(nodes) == 100
+  assert numpy.allclose(nodes[22], eyes['reye'])
+  assert numpy.allclose(nodes[27], eyes['leye'])
+
+  assert nodes[0] < eyes['reye']
+  assert nodes[-1] > eyes['leye']
+
+
+def test_lgbphs():
+  data = _data()
+  lgbphs = bob.bio.base.load_resource('lgbphs', 'extractor')
+  assert isinstance(lgbphs, bob.bio.face.extractor.LGBPHS)
+  assert isinstance(lgbphs, bob.bio.base.extractor.Extractor)
+  assert not lgbphs.requires_training
+
+  # in this test, we use a smaller setup of the LGBPHS features
+  lgbphs = bob.bio.face.extractor.LGBPHS(
+      block_size = 8,
+      block_overlap = 0,
+      gabor_directions = 4,
+      gabor_scales = 2,
+      gabor_sigma = math.sqrt(2.) * math.pi,
+      sparse_histogram = True
+  )
+
+  # extract feature
+  feature = lgbphs(data)
+  assert feature.ndim == 2
+
+  reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/lgbphs_sparse.hdf5')
+  _compare(feature, reference, lgbphs.write_feature, lgbphs.read_feature)
+
+  # generate new non-sparse extractor including Gabor phases
+  lgbphs = bob.bio.face.extractor.LGBPHS(
+      block_size = 8,
+      block_overlap = 0,
+      gabor_directions = 4,
+      gabor_scales = 2,
+      gabor_sigma = math.sqrt(2.) * math.pi,
+      use_gabor_phases = True
+  )
+  feature = lgbphs(data)
+  assert feature.ndim == 1
+
+  reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/lgbphs_with_phase.hdf5')
+  _compare(feature, reference, lgbphs.write_feature, lgbphs.read_feature)
+
+
+def test_eigenface():
+  temp_file = bob.io.base.test_utils.temporary_filename()
+  data = _data()
+  eigen1 = bob.bio.base.load_resource('eigenface', 'extractor')
+  assert isinstance(eigen1, bob.bio.face.extractor.Eigenface)
+  assert isinstance(eigen1, bob.bio.base.extractor.Extractor)
+  assert eigen1.requires_training
+
+  # create extractor with a smaller number of kept eigenfaces
+  train_data = utils.random_training_set(data.shape, 400, 0., 255.)
+  eigen2 = bob.bio.face.extractor.Eigenface(subspace_dimension = 5)
+  reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/eigenface_extractor.hdf5')
+  try:
+    # train the projector
+    eigen2.train(train_data, temp_file)
+
+    assert os.path.exists(temp_file)
+
+    if regenerate_refs: shutil.copy(temp_file, reference_file)
+
+    # check projection matrix
+    eigen1.load(reference)
+    eigen2.load(temp_file)
+
+    assert eigen1.machine.shape == eigen2.machine.shape
+    for i in range(5):
+      assert numpy.abs(eigen1.machine.weights[:,i] - eigen2.machine.weights[:,i] < 1e-5).all() or numpy.abs(eigen1.machine.weights[:,i] + eigen2.machine.weights[:,i] < 1e-5).all()
+
+  finally:
+    if os.path.exists(temp_file): os.remove(temp_file)
+
+  # now, we can execute the extractor and check that the feature is still identical
+  feature = eigen1(data)
+  assert feature.ndim == 1
+  reference = pkg_resources.resource_filename('bob.bio.face.test', 'data/eigenface_feature.hdf5')
+  _compare(feature, reference, eigen1.write_feature, eigen1.read_feature)
+
+
+"""
+  def test05_sift_key_points(self):
+    # check if VLSIFT is available
+    import bob.ip.base
+    if not hasattr(bob.ip.base, "VLSIFT"):
+      raise SkipTest("VLSIFT is not part of bob.ip.base; maybe SIFT headers aren't installed in your system?")
+
+    # we need the preprocessor tool to actually read the data
+    preprocessor = facereclib.preprocessing.Keypoints()
+    data = preprocessor.read_data(self.input_dir('key_points.hdf5'))
+    # now, we extract features from it
+    extractor = self.config('sift')
+    feature = self.execute(extractor, data, 'sift.hdf5', epsilon=1e-4)
+    self.assertEqual(len(feature.shape), 1)
+
+
+"""
diff --git a/setup.py b/setup.py
index ada220b05f075855fb430f7afbeccd886669535a..6ec602bbdcf7f8d2c58be227582960dbed31ca6d 100644
--- a/setup.py
+++ b/setup.py
@@ -120,6 +120,10 @@ setup(
       ],
 
       'bob.bio.extractor': [
+        'dct-blocks        = bob.bio.face.config.extractor.dct_blocks:extractor', # DCT blocks
+        'grid-graph        = bob.bio.face.config.extractor.grid_graph:extractor', # Grid graph
+        'lgbphs            = bob.bio.face.config.extractor.lgbphs:extractor', # LGBPHS
+        'eigenface         = bob.bio.face.config.extractor.eigenface:extractor', # Eigenface
       ],
 
       'bob.bio.algorithm': [