diff --git a/bob/bio/face/algorithm/__init__.py b/bob/bio/face/algorithm/__init__.py
index 63b1837764b38cf36b379c803ca210820c4ec9d1..ae17f3c05fd46ab2d70b5ced7560ec967704b9a7 100644
--- a/bob/bio/face/algorithm/__init__.py
+++ b/bob/bio/face/algorithm/__init__.py
@@ -1,2 +1,5 @@
 from .GaborJet import GaborJet
 from .LGBPHS import LGBPHS
+
+# gets sphinx autodoc done right - don't remove it
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/face/config/extractor/eigenface.py b/bob/bio/face/config/extractor/eigenface.py
index 7fae79ed1297536894ec5948bb08fa9176ed3c91..f0f38a77964c32092b4a575ff46bd592138126b4 100644
--- a/bob/bio/face/config/extractor/eigenface.py
+++ b/bob/bio/face/config/extractor/eigenface.py
@@ -4,5 +4,5 @@ import bob.bio.face
 
 # compute eigenfaces using the training database
 extractor = bob.bio.face.extractor.Eigenface(
-    subspace_dimension = 100
+    subspace_dimension = .95
 )
diff --git a/bob/bio/face/config/extractor/grid_graph.py b/bob/bio/face/config/extractor/grid_graph.py
index 02b93c0a7b0f8331fc4495b6453c007450850240..f5741a01a7cae96966950592748c94c655478edc 100644
--- a/bob/bio/face/config/extractor/grid_graph.py
+++ b/bob/bio/face/config/extractor/grid_graph.py
@@ -4,9 +4,6 @@ import bob.bio.base
 import bob.bio.face
 import math
 
-# load the face cropping parameters
-cropper = bob.bio.base.load_resource("face-crop-eyes", "preprocessor")
-
 extractor = bob.bio.face.extractor.GridGraph(
     # Gabor parameters
     gabor_sigma = math.sqrt(2.) * math.pi,
@@ -15,7 +12,5 @@ extractor = bob.bio.face.extractor.GridGraph(
     normalize_gabor_jets = True,
 
     # setup of the fixed grid
-    node_distance = (4, 4),
-    first_node = (6, 6),
-    image_resolution = cropper.cropped_image_size
+    node_distance = (8, 8)
 )
diff --git a/bob/bio/face/extractor/Eigenface.py b/bob/bio/face/extractor/Eigenface.py
index ad8bb884b1df7b4e3f516151978bdd3389394ab4..b5464cee431ec34fd1f04f1f452c6a7d023d228e 100644
--- a/bob/bio/face/extractor/Eigenface.py
+++ b/bob/bio/face/extractor/Eigenface.py
@@ -36,7 +36,18 @@ class Eigenface (Extractor):
 
     logger.info("  -> Training LinearMachine using PCA (SVD)")
     t = bob.learn.linear.PCATrainer()
-    self.machine, __eig_vals = t.train(data)
+    self.machine, variances = t.train(data)
+
+    # compute variance percentage, if desired
+    if isinstance(self.subspace_dimension, float):
+      cummulated = numpy.cumsum(variances) / numpy.sum(variances)
+      for index in range(len(cummulated)):
+        if cummulated[index] > self.subspace_dimension:
+          self.subspace_dimension = index
+          break
+      self.subspace_dimension = index
+      logger.info("  -> Keeping %d eigenvectors" % self.subspace_dimension)
+
     # Machine: get shape, then resize
     self.machine.resize(self.machine.shape[0], self.subspace_dimension)
     self.machine.save(bob.io.base.HDF5File(extractor_file, "w"))
diff --git a/bob/bio/face/extractor/GridGraph.py b/bob/bio/face/extractor/GridGraph.py
index 783521cdc4c3bce0bcb060087d5a8a3325bf88af..c8a2a9acccc2b9f4baaab06d85656b9ad7f1f57a 100644
--- a/bob/bio/face/extractor/GridGraph.py
+++ b/bob/bio/face/extractor/GridGraph.py
@@ -35,7 +35,6 @@ class GridGraph (Extractor):
 
       # setup of static grid
       node_distance = None,    # one or two integral values
-      image_resolution = None, # always two integral values
       first_node = None,       # one or two integral values, or None -> automatically determined
   ):
 
@@ -57,7 +56,6 @@ class GridGraph (Extractor):
         nodes_above_eyes = nodes_above_eyes,
         nodes_below_eyes = nodes_below_eyes,
         node_distance = node_distance,
-        image_resolution = image_resolution,
         first_node = first_node
     )
 
@@ -74,7 +72,7 @@ class GridGraph (Extractor):
 
     # create graph extractor
     if eyes is not None:
-      self.graph = bob.ip.gabor.Graph(
+      self._aligned_graph = bob.ip.gabor.Graph(
           righteye = [int(e) for e in eyes['reye']],
           lefteye = [int(e) for e in eyes['leye']],
           between = int(nodes_between_eyes),
@@ -83,43 +81,63 @@ class GridGraph (Extractor):
           below = int(nodes_below_eyes)
       )
     else:
-      if node_distance is None or image_resolution is None:
-        raise ValueError("Please specify either 'eyes' or the grid parameters 'first_node', 'last_node', and 'node_distance'!")
-      if isinstance(node_distance, (int, float)):
-         node_distance = (int(node_distance), int(node_distance))
-      if first_node is None:
+      if node_distance is None:
+        raise ValueError("Please specify either 'eyes' or the grid parameters 'node_distance' (and 'first_node')!")
+      self._aligned_graph = None
+      self._last_image_resolution = None
+      self.first_node = first_node
+      self.node_distance = node_distance
+      if isinstance(self.node_distance, (int, float)):
+         self.node_distance = (int(self.node_distance), int(self.node_distance))
+
+    self.normalize_jets = normalize_gabor_jets
+    self.trafo_image = None
+
+  def _extractor(self, image):
+    """Creates an extractor based on the given image."""
+
+    if self.trafo_image is None or self.trafo_image.shape[1:3] != image.shape:
+      # create trafo image
+      self.trafo_image = numpy.ndarray((self.gwt.number_of_wavelets, image.shape[0], image.shape[1]), numpy.complex128)
+
+    if self._aligned_graph is not None:
+      return self._aligned_graph
+
+    if self._last_image_resolution != image.shape:
+      self._last_image_resolution = image.shape
+      if self.first_node is None:
         first_node = [0,0]
         for i in (0,1):
-          offset = int((image_resolution[i] - int(image_resolution[i]/node_distance[i])*node_distance[i]) / 2)
-          if offset < node_distance[i]//2: # This is not tested, but should ALWAYS be the case.
-            offset += node_distance[i]//2
+          offset = int((image.shape[i] - int(image.shape[i]/self.node_distance[i])*self.node_distance[i]) / 2)
+          if offset < self.node_distance[i]//2: # This is not tested, but should ALWAYS be the case.
+            offset += self.node_distance[i]//2
           first_node[i] = offset
-      last_node = tuple([int(image_resolution[i] - max(first_node[i],1)) for i in (0,1)])
+      else:
+        first_node = self.first_node
+      last_node = tuple([int(image.shape[i] - max(first_node[i],1)) for i in (0,1)])
 
       # take the specified nodes
-      self.graph = bob.ip.gabor.Graph(
+      self._graph = bob.ip.gabor.Graph(
           first = first_node,
           last = last_node,
-          step = node_distance
+          step = self.node_distance
       )
 
-    self.normalize_jets = normalize_gabor_jets
-    self.trafo_image = None
+    return self._graph
+
 
   def __call__(self, image):
     assert image.ndim == 2
     assert isinstance(image, numpy.ndarray)
     assert image.dtype == numpy.float64
 
-    if self.trafo_image is None or self.trafo_image.shape[1:3] != image.shape:
-      # create trafo image
-      self.trafo_image = numpy.ndarray((self.gwt.number_of_wavelets, image.shape[0], image.shape[1]), numpy.complex128)
+    extractor = self._extractor(image)
 
     # perform Gabor wavelet transform
     self.gwt.transform(image, self.trafo_image)
 
     # extract face graph
-    jets = self.graph.extract(self.trafo_image)
+    jets = extractor.extract(self.trafo_image)
 
     # normalize the Gabor jets of the graph only
     if self.normalize_jets:
diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py
index ee10aebc0ff20b0d8f083c59c7bc178a9cf6fae6..0226aadf9a7973cbac85f2b677a6757d823ea799 100644
--- a/bob/bio/face/extractor/__init__.py
+++ b/bob/bio/face/extractor/__init__.py
@@ -2,3 +2,6 @@ from .DCTBlocks import DCTBlocks
 from .GridGraph import GridGraph
 from .LGBPHS import LGBPHS
 from .Eigenface import Eigenface
+
+# gets sphinx autodoc done right - don't remove it
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/face/preprocessor/FaceDetect.py b/bob/bio/face/preprocessor/FaceDetect.py
index e833c67ce40a84c1bed732cb6d9fc1decc4b43b3..76e959d625e84effd1d20ea0d2615b21d743776a 100644
--- a/bob/bio/face/preprocessor/FaceDetect.py
+++ b/bob/bio/face/preprocessor/FaceDetect.py
@@ -45,6 +45,8 @@ class FaceDetect (Base):
       lowest_scale = lowest_scale
     )
 
+    assert face_cropper is not None
+
     self.sampler = bob.ip.facedetect.Sampler(scale_factor=scale_base, lowest_scale=lowest_scale, distance=distance)
     if cascade is None:
       self.cascade = bob.ip.facedetect.default_cascade()
diff --git a/bob/bio/face/preprocessor/__init__.py b/bob/bio/face/preprocessor/__init__.py
index 1b91073782fa539dd520ac6602cb01448eab11b9..c2241a8685383fefe83c818f3b365adcd27096b8 100644
--- a/bob/bio/face/preprocessor/__init__.py
+++ b/bob/bio/face/preprocessor/__init__.py
@@ -6,3 +6,6 @@ from .TanTriggs import TanTriggs
 from .INormLBP import INormLBP
 from .HistogramEqualization import HistogramEqualization
 from .SelfQuotientImage import SelfQuotientImage
+
+# gets sphinx autodoc done right - don't remove it
+__all__ = [_ for _ in dir() if not _.startswith('_')]
diff --git a/bob/bio/face/script/baselines.py b/bob/bio/face/script/baselines.py
index 0cc241b1e59a8b850614df5e89931bb6c03272b1..0358684e47700b9d353bb617bc6ca0f249cb41b7 100755
--- a/bob/bio/face/script/baselines.py
+++ b/bob/bio/face/script/baselines.py
@@ -58,8 +58,9 @@ def command_line_arguments(command_line_parameters):
   parser.add_argument('-d', '--database', choices = available_databases, default = 'atnt', help = 'The database on which the baseline algorithm is executed.')
   # - the database to choose
   parser.add_argument('-b', '--baseline-directory', default = 'baselines', help = 'The sub-directory, where the baseline results are stored.')
-  # - the directory to write
-  parser.add_argument('-f', '--directory', help = 'The directory to write the data of the experiment into. If not specified, the default directories of the verify.py script are used (see ./bin/verify.py --help).')
+  # - the directories to write to
+  parser.add_argument('-T', '--temp-directory', help = 'The directory to write temporary the data of the experiment into. If not specified, the default directory of the verify.py script is used (see ./bin/verify.py --help).')
+  parser.add_argument('-R', '--result-directory', help = 'The directory to write the resulting score files of the experiment into. If not specified, the default directories of the verify.py script are used (see ./bin/verify.py --help).')
 
   # - use the Idiap grid -- option is only useful if you are at Idiap
   parser.add_argument('-g', '--grid', action = 'store_true', help = 'Execute the algorithm in the SGE grid.')
@@ -225,9 +226,11 @@ def main(command_line_parameters = None):
       if has_eval:
         command += ['--groups', 'dev', 'eval']
 
-      # set the directories, if desired; we set both directories to be identical.
-      if args.directory is not None:
-        command += ['--temp-directory', os.path.join(args.directory, args.database), '--result-directory', os.path.join(args.directory, args.database)]
+      # set the directories, if desired
+      if args.temp_directory is not None:
+        command += ['--temp-directory', os.path.join(args.temp_directory)]
+      if args.result_directory is not None:
+        command += ['--result-directory', os.path.join(args.result_directory)]
 
       # set the verbosity level
       if args.verbose:
@@ -249,17 +252,17 @@ def main(command_line_parameters = None):
 
     # get the base directory of the results
     is_idiap = os.path.isdir("/idiap")
-    if args.directory is None:
-      args.directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results"
-    if not os.path.exists(args.directory):
+    if args.result_directory is None:
+      args.result_directory = "/idiap/user/%s/%s" % (os.environ["USER"], args.database) if is_idiap else "results"
+    if not os.path.exists(args.result_directory):
       if not args.dry_run:
-        raise IOError("The result directory '%s' cannot be found. Please specify the --directory as it was specified during execution of the algorithms." % args.directory)
+        raise IOError("The result directory '%s' cannot be found. Please specify the --result-directory as it was specified during execution of the algorithms." % args.result_directory)
 
     # get the result directory of the database
-    result_dir = os.path.join(args.directory, args.baseline_directory)
+    result_dir = os.path.join(args.result_directory, args.baseline_directory)
     if not os.path.exists(result_dir):
       if not args.dry_run:
-        raise IOError("The result directory '%s' for the desired database cannot be found. Did you already run the experiments for this database?" % result_dir)
+        raise IOError("The result directory '%s' for the desired experiment cannot be found. Did you already run the experiments?" % result_dir)
 
     # iterate over the algorithms and collect the result files
     result_dev = []
diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py
index 97fee729e69b38eb216678e5d1b67cacd04753dd..41b657da41233e2005a2829f1b5d9de8a4be253a 100644
--- a/bob/bio/face/test/test_extractors.py
+++ b/bob/bio/face/test/test_extractors.py
@@ -74,7 +74,7 @@ def test_graphs():
   assert not graph.requires_training
 
   # generate smaller extractor, using mixed tuple and int input for the node distance and first location
-  graph = bob.bio.face.extractor.GridGraph(node_distance = 24, image_resolution = data.shape)
+  graph = bob.bio.face.extractor.GridGraph(node_distance = 24)
 
   # extract features
   feature = graph(data)
@@ -104,7 +104,7 @@ def test_graphs():
     nodes_below_eyes = 7
   )
 
-  nodes = graph.graph.nodes
+  nodes = graph._extractor(data).nodes
   assert len(nodes) == 100
   assert numpy.allclose(nodes[22], eyes['reye'])
   assert numpy.allclose(nodes[27], eyes['leye'])
diff --git a/doc/baselines.rst b/doc/baselines.rst
new file mode 100644
index 0000000000000000000000000000000000000000..97fc3a83f611b7119ed30f0452f50204503f16db
--- /dev/null
+++ b/doc/baselines.rst
@@ -0,0 +1,194 @@
+.. vim: set fileencoding=utf-8 :
+.. author: Manuel Günther <manuel.guenther@idiap.ch>
+.. date: Thu Sep 20 11:58:57 CEST 2012
+
+.. _baselines:
+
+=============================
+Executing Baseline Algorithms
+=============================
+
+The first thing you might want to do is to execute one of the baseline face recognition algorithms that are implemented in ``bob.bio``.
+
+Setting up your Database
+------------------------
+
+As mentioned in the documentation of :ref:`bob.bio.base <bob.bio.base>`, the image databases are not included in this package, so you have to download them.
+For example, you can easily download the images of the `AT&T database`_, for links to other utilizable image databases please read the :ref:`bob.bio.face.databases` section.
+
+By default, ``bob.bio`` does not know, where the images are located.
+Hence, before running experiments you have to specify the image database directories.
+How this is done is explained in more detail in the :ref:`bob.bio.base.installation`.
+
+
+Running Baseline Experiments
+----------------------------
+
+To run the baseline experiments, you can use the ``./bin/baselines.py`` script by just going to the console and typing:
+
+.. code-block:: sh
+
+   $ ./bin/baselines.py
+
+This script is a simple wrapper for the ``./bin/verify.py`` script that is explained in more detail in :ref:`bob.bio.base.experiments`.
+The ``./bin/baselines.py --help`` option shows you, which other options you have.
+Here is an almost complete extract:
+
+* ``--database``: The database and protocol you want to use.
+  By default this is set to the image database *atnt*.
+* ``--algorithms``: The recognition algorithms that you want to execute.
+  By default, only the *eigenface* algorithm is executed.
+* ``--all``: Execute all algorithms that are implemented.
+* ``--temp-directory``: The directory where temporary files of the experiments are put to.
+* ``--result-directory``: The directory where resulting score files of the experiments are put to.
+* ``--evaluate``: After running the experiments, the resulting score files will be evaluated, and the result is written to console.
+* ``--dry-run``: Instead of executing the algorithm (or the evaluation), only print the command that would have been executed.
+* ``--verbose``: Increase the verbosity level of the script.
+  By default, only the commands that are executed are printed, and the rest of the calculation runs quietly.
+  You can increase the verbosity by adding the ``--verbose`` parameter repeatedly (up to three times).
+
+Usually it is a good idea to have at least verbose level 2 (i.e., calling ``./bin/baselines.py --verbose --verbose``, or the short version ``./bin/baselines.py -vv``).
+
+Running in Parallel
+~~~~~~~~~~~~~~~~~~~
+
+To run the experiments in parallel, as usual you can define an SGE grid configuration, or run with parallel threads on the local machine.
+For the ``./bin/baselines.py`` script, the grid configuration is adapted to each of the algorithms.
+Hence, to run in the SGE grid, you can simply add the ``--grid`` command line option, without parameters.
+Similarly, to run the experiments in parallel on the local machine, simply add a ``--parallel <N>`` option, where ``<N>`` specifies the number of parallel jobs you want to execute.
+
+When running the algorithms from the :ref:`bob.bio.gmm <bob.bio.gmm>` package in parallel, the specialized scripts are executed.
+This will speed up the training of the UBM (and possible additional steps) tremendously.
+
+
+The Algorithms
+--------------
+
+The algorithms present an (incomplete) set of state-of-the-art face recognition algorithms. Here is the list of short-cuts:
+
+* ``eigenface``: The eigenface algorithm as proposed by [TP91]_. It uses the pixels as raw data, and applies a *Principal Component Analysis* (PCA) on it:
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
+  - feature : :py:class:`bob.bio.base.extractor.Linearize`
+  - algorithm : :py:class:`bob.bio.base.algorithm.PCA`
+
+* ``lda``: The LDA algorithm applies a *Linear Discriminant Analysis* (LDA), here we use the combined PCA+LDA approach [ZKC+98]_:
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
+  - feature : :py:class:`bob.bio.face.extractor.Eigenface`
+  - algorithm : :py:class:`bob.bio.base.algorithm.LDA`
+
+* ``gaborgraph``: This method extract grid graphs of Gabor jets from the images, and computes a Gabor phase based similarity [GHW12]_.
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.INormLBP`
+  - feature : :py:class:`bob.bio.face.extractor.GridGraph`
+  - algorithm : :py:class:`bob.bio.face.algorithm.GaborJet`
+
+
+* ``lgbphs``: *Local Gabor Binary Pattern Histogram Sequences* (LGBPHS) [ZSG+05]_ are extracted from the images and compares using the histogram intersection measure:
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
+  - feature : :py:class:`bob.bio.face.extractor.LGBPHS`
+  - algorithm : :py:class:`bob.bio.face.algorithm.LGBPHS`
+
+* ``plda``: *Probabilistic LDA* (PLDA) [Pri07]_ is a probabilistic generative version of the LDA, in its scalable formulation of [ESM+13]_.
+  Here, we also apply it on pixel-based representations of the image, though also other features should be possible.
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
+  - feature : :py:class:`bob.bio.base.extractor.Linearize`
+  - algorithm : :py:class:`bob.bio.base.algorithm.PLDA`
+
+* ``bic``: In the *Bayesian Intrapersonal/Extrapersonal Classifier* (BIC) [MWP98]_, a gabor-grid-graph based similarity vector is classified to be intrapersonal (i.e., both images are from the same person) or extrapersonal, as explained in [GW09]_.
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.FaceCrop`
+  - feature : :py:class:`bob.bio.face.extractor.GridGraph`
+  - algorithm : :py:class:`bob.bio.base.algorithm.BIC`
+
+.. note::
+  The ``plda`` algorithm is currently under construction and the setup is not yet useful.
+
+
+Further algorithms are available, when the :ref:`bob.bio.gmm <bob.bio.gmm>` package is installed:
+
+* ``gmm``: *Gaussian Mixture Models* (GMM) [MM09]_ are extracted from *Discrete Cosine Transform* (DCT) block features.
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
+  - feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
+  - algorithm : :py:class:`bob.bio.gmm.algorithm.GMM`
+
+* ``isv``: As an extension of the GMM algorithm, *Inter-Session Variability* (ISV) modeling [WMM+11]_ is used to learn what variations in images are introduced by identity changes and which not.
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
+  - feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
+  - algorithm : :py:class:`bob.bio.gmm.algorithm.ISV`
+
+* ``ivector``: Another extension of the GMM algorithm is *Total Variability* (TV) modeling [WM12]_ (aka. I-Vector), which tries to learn a subspace in the GMM super-vector space.
+
+  - preprocessor : :py:class:`bob.bio.face.preprocessor.TanTriggs`
+  - feature : :py:class:`bob.bio.face.extractor.DCTBlocks`
+  - algorithm : :py:class:`bob.bio.gmm.algorithm.IVector`
+
+.. note::
+  The ``ivector`` algorithm needs a lot of training data and fails on small databases such as the `AT&T database`_.
+
+
+Additionally, the following algorithms can be executed, when the :ref:`bob.bio.csu <bob.bio.csu>` package is installed.
+
+* ``lrpca``: In Local Region PCA [PBD+11]_, the face is sub-divided into local regions and a PCA is performed for each local region.
+
+  - preprocessor : :py:class:`bob.bio.csu.preprocessor.LRPCA`
+  - feature : :py:class:`bob.bio.csu.extractor.LRPCA`
+  - algorithm : :py:class:`bob.bio.csu.algorithm.LRPCA`
+
+* ``lda_ir``: The LDA-IR (a.k.a. CohortLDA [LBP+12]_) extracts color information from images after, and computes a PCA+LDA projection on two color layers.
+
+  - preprocessor : :py:class:`bob.bio.csu.preprocessor.LDAIR`
+  - feature : :py:class:`bob.bio.csu.extractor.LDAIR`
+  - algorithm : :py:class:`bob.bio.csu.algorithm.LDAIR`
+
+.. note::
+   The ``lrpca`` and ``ldair`` algorithms require hand-labeled eye locations.
+   Therefore, they can not be run on the default ``atnt`` database.
+
+.. _baseline_results:
+
+Baseline Results
+----------------
+
+To evaluate the results, a wrapper call to ``./bin/evaluate.py`` is produced by the ``./bin/baselines.py --evaluate`` command.
+Several types of evaluation can be achieved, see :ref:`bob.bio.base.evaluate` for details.
+Particularly, here we can enable ROC curves, DET plots, CMC curves and the computation of EER/HTER.
+Hence, the complete set of results of the baseline experiments are generated using:
+
+.. code-block:: sh
+
+  $ ./bin/baselines.py --all -vv --evaluate ROC DET CMC HTER
+
+If you specified other parameters for the execution of the algorithms, e.g., the ``--directory`` flag, you have to add these options here as well.
+If you ran only a sub-set of the available, the missing algorithms will just be skipped.
+The resulting files will be ``ROC.pdf``, ``DET.pdf`` and ``CMC.pdf``, and the HTER results are simply written to console.
+
+For the `AT&T database`_ the results should be as follows:
+
+.. image:: img/ROC.png
+  :width: 35%
+.. image:: img/DET.png
+  :width: 27%
+.. image:: img/CMC.png
+  :width: 35%
+
+
+.. table:: The HTER results of the baseline algorithms on the AT&T database
+
+  +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+  |  eigenface  |     lda     |  gaborgraph |    lgbphs   |     gmm     |     isv     |    plda     |     bic     |
+  +=============+=============+=============+=============+=============+=============+=============+=============+
+  |   8.368%    |    9.763%   |   4.579%    |    8.500%   |    1.237%   |    0.053%   |    7.921%   |    3.526%   |
+  +-------------+-------------+-------------+-------------+-------------+-------------+-------------+-------------+
+
+
+.. note::
+  The ``lrpca`` and ``lda_ir`` algorithms require hand-labeled eye positions to run.
+  Since the AT&T database does not provide eye positions, it is not possible to provide baseline results on AT&T for these two algorithms.
+
+.. include:: links.rst
diff --git a/doc/conf.py b/doc/conf.py
index 7d42f3e1c879e526a35932e6185b815a42583670..87fe2b217d1e905d4c76ac5b7c9aec5ae375d5ad 100644
--- a/doc/conf.py
+++ b/doc/conf.py
@@ -86,7 +86,7 @@ release = distribution.version
 
 # List of patterns, relative to source directory, that match files and
 # directories to ignore when looking for source files.
-#exclude_patterns = ['**/links.rst']
+exclude_patterns = ['links.rst', 'references.rst']
 
 # The reST default role (used for this markup: `text`) to use for all documents.
 #default_role = None
@@ -133,12 +133,12 @@ if sphinx.__version__ >= "1.0":
 
 # The name of an image file (relative to this directory) to place at the top
 # of the sidebar.
-html_logo = ''
+html_logo = 'img/logo.png'
 
 # The name of an image file (within the static path) to use as favicon of the
 # docs.  This file should be a Windows icon file (.ico) being 16x16 or 32x32
 # pixels large.
-html_favicon = ''
+html_favicon = 'img/favicon.ico'
 
 # Add any paths that contain custom static files (such as style sheets) here,
 # relative to this directory. They are copied after the builtin static files,
@@ -246,7 +246,7 @@ autodoc_default_flags = ['members', 'undoc-members', 'inherited-members', 'show-
 
 # For inter-documentation mapping:
 from bob.extension.utils import link_documentation
-intersphinx_mapping = link_documentation(['python', 'numpy', 'bob.io.base', 'bob.db.verification.utils'])
+intersphinx_mapping = link_documentation(['python', 'numpy', 'bob.bio.gmm', 'bob.bio.csu'])
 
 
 def setup(app):
diff --git a/doc/img/CMC.png b/doc/img/CMC.png
new file mode 100644
index 0000000000000000000000000000000000000000..4adf6a233298de74b96cfeaa2c17d88ae49de7e7
Binary files /dev/null and b/doc/img/CMC.png differ
diff --git a/doc/img/DET.png b/doc/img/DET.png
new file mode 100644
index 0000000000000000000000000000000000000000..f8ef499c16c3c13b8f44073aae17727776b41371
Binary files /dev/null and b/doc/img/DET.png differ
diff --git a/doc/img/ROC.png b/doc/img/ROC.png
new file mode 100644
index 0000000000000000000000000000000000000000..10825c3117f34bc8b78b174f28701e8b691ae902
Binary files /dev/null and b/doc/img/ROC.png differ
diff --git a/doc/img/favicon.ico b/doc/img/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..4cc3264302627d40868261add69eb755856611b6
Binary files /dev/null and b/doc/img/favicon.ico differ
diff --git a/doc/img/logo.png b/doc/img/logo.png
new file mode 100644
index 0000000000000000000000000000000000000000..b9dd573a01019afd1af58a881996930e5212699d
Binary files /dev/null and b/doc/img/logo.png differ
diff --git a/doc/implementation.rst b/doc/implementation.rst
new file mode 100644
index 0000000000000000000000000000000000000000..76d9d4b74bd14353689f6ca6f70eb1f094ee0e8a
--- /dev/null
+++ b/doc/implementation.rst
@@ -0,0 +1,197 @@
+
+======================
+Implementation Details
+======================
+
+Image preprocessing
+-------------------
+
+Image preprocessing is an important stage for face recognition.
+In :ref:`bob.bio.face <bob.bio.face>`, several different algorithms to perform photometric enhancement of facial images are implemented.
+These algorithms rely on facial images, which are aligned according to the eye locations, and scaled to a specific image resolution.
+
+Face cropping
+~~~~~~~~~~~~~
+
+However, for most of the image databases, in the original face images the faces are not aligned, but instead the eye locations are labeled by hand.
+Hence, before the photometric enhancement algorithms can be applied, faces must be aligned according to the hand-labeled eye locations.
+This can be achieved using the :py:class:`bob.bio.face.preprocessor.FaceCrop` class.
+It will take the image and the hand-labeled eye locations and crop the face according to some parameters, which can be defined in its constructor.
+
+So, now we have a preprocessors to perform face cropping, and some preprocessors to perform photometric enhancement.
+However, we might want to have a photometric enhancement *on top of* the aligned faces.
+In theory, there are several ways to achieve this:
+
+1. Copy the face alignment code into all photometric enhancement classes.
+
+   As copying code is generally a bad choice, we drop this option.
+
+
+2. Use the face cropping as a base class and derive the photometric enhancement classes from it.
+
+   This option is worth implementing, and this was the way, the FaceRecLib_ handled preprocessing.
+   However, it required to copy code inside the configuration files.
+   This means that, when we want to run on a different image resolution, we need to change all configuration files.
+   Option 2 dropped.
+
+
+3. Provide the face cropper as parameter to the photometric enhancement classes.
+
+   This option has the advantage that the configuration has to be written only once.
+   Also, we might change the face cropper to something else later, without needing to the the photometric enhancement code later on.
+   Option 3 accepted.
+
+Now, we have a closer look into how the image preprocessing is implemented.
+Let's take the example of the :py:class:`bob.bio.face.preprocessor.TanTriggs`.
+The constructor takes a ``face_cropper`` as parameter.
+This ``face_cropper`` can be ``None``, when the images are already aligned.
+It can also be a :py:class:`bob.bio.face.preprocessor.FaceCrop` object, which is contains the information, how faces are cropped.
+The :py:class:`bob.bio.face.preprocessor.TanTriggs` algorithm will use the ``face_cropper`` to crop the face, by passing the image and the annotations to the :py:meth:`bob.bio.face.preprocessor.FaceCrop.crop_face` function, perform the photometric enhancement on the cropped image, and return the result.
+
+So far, there is no advantage of option 2 over option 3, since the parameters for face cropping still have to be specified in the configuration file.
+But now comes the clue: The third option, how a ``face_cropper`` can be passed to the constructor is as a :ref:`Resource <bob.bio.face.preprocessors>` key, such as ``'face-crop-eyes'``.
+This will load the face cropping configuration from the registered resource, which has to be generated only once.
+So, to generate a TanTriggs preprocessor that performs face cropping, you can create:
+
+.. code-block:: py
+
+   preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper = 'face-crop-eyes')
+
+
+Face detection
+~~~~~~~~~~~~~~
+
+Alright.
+Now if you have swallowed that, there comes the next step: face detection.
+Some of the databases do neither provide hand-labeled eye locations, nor are the images pre-cropped.
+However, we want to use the same algorithms on those images as well, so we have to detect the face (and the facial landmarks), crop the face and perform a photometric enhancement.
+So, image preprocessing becomes a three stage algorithm.
+
+How to combine the two stages, image alignment and photometric enhancement, we have seen before.
+Fortunately, the same technique can be applied for the :py:class:`bob.bio.face.preprocessor.FaceDetect`.
+The face detector takes as an input a ``face_cropper``, where we can use the same options to select a face cropper, just that we cannot pass ``None``.
+Interestingly, the face detector itself can be used as a ``face_cropper`` inside the photometric enhancement classes.
+Hence, to generate a TanTriggs preprocessor that performs face detection, crops the face and performs photometric enhancement, you can create:
+
+.. code-block:: py
+
+   preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper = bob.bio.face.preprocessor.FaceDetect(face_cropper = 'face-crop-eyes', use_flandmark = True) )
+
+Or simply (using the face detector :ref:`Resource <bob.bio.face.preprocessors>`):
+
+.. code-block:: py
+
+   preprocessor = bob.bio.face.preprocessor.TanTriggs(face_cropper = 'face-crop-eyes')
+
+
+.. _bob.bio.face.resources:
+
+Registered Resources
+--------------------
+
+.. _bob.bio.face.databases:
+
+Databases
+~~~~~~~~~
+
+One important aspect of :ref:`bob.bio.face <bob.bio.face>` is the relatively large list of supported image data sets, including well-defined evaluation protocols.
+All databases rely on the :py:class:`bob.bio.base.database.DatabaseBob` interface, which in turn uses the :ref:`verification_databases`.
+Please check the link above for information on how to obtain the original data of those data sets.
+
+After downloading and extracting the original data of the data sets, it is necessary that the scripts know, where the data was installed.
+For this purpose, the ``./bin/verify.py`` script can read a special file, where those directories are stored, see :ref:`bob.bio.base.installation`.
+By default, this file is located in your home directory, but you can specify another file on command line.
+
+The other option is to change the directories directly inside the configuration files.
+Here is the list of files and replacement strings for all databases that are registered as resource, in alphabetical order:
+
+* The AT&T database of faces: ``'atnt'``
+
+  - Images: ``[YOUR_ATNT_DIRECTORY]``
+
+* AR face: ``'arface'``
+
+  - Images: ``[YOUR_ARFACE_DIRECTORY]``
+
+* BANCA (english): ``'banca'``
+
+  - Images: [YOUR_BANCA_DIRECTORY]
+
+* CAS-PEAL: ``'caspeal'``
+
+  - Images: ``[YOUR_CAS-PEAL_DIRECTORY]``
+
+* Face Recognition Grand Challenge v2 (FRGC): ``'frgc'``
+
+  - Complete directory: ``[YOUR_FRGC_DIRECTORY]``
+
+  .. note::
+     Due to implementation details, there will be a warning, when the FRGC database resource is loaded.
+     To avoid this warning, you have to modify the FRGC database configuration file.
+
+* The Good, the Bad and the Ugly (GBU): ``'gbu'``
+
+  - Images (taken from MBGC-V1): ``[YOUR_MBGC-V1_DIRECTORY]``
+
+* Labeled Faces in the Wild (LFW): ``'lfw-restricted'``, `'lfw-unrestricted'``
+
+  - Images (aligned with funneling): ``[YOUR_LFW_FUNNELED_DIRECTORY]``
+
+  .. note::
+     In the :ref:`bob.db.lfw <bob.db.lfw>` database interface, we provide automatically detected eye locations, which were detected on the funneled images.
+     Face cropping using these eye locations will only work with the correct images.
+     However, when using the face detector, all types of images will work.
+
+* MOBIO: ``'mobio-image'``, ``'mobio-male'`` ``'mobio-female'``
+
+  - Images (the .png images): ``[YOUR_MOBIO_IMAGE_DIRECTORY]``
+  - Annotations (eyes): ``[YOUR_MOBIO_ANNOTATION_DIRECTORY]``
+
+* Multi-PIE: ``'multipie'``, ``'multipie-pose'``
+
+  - Images: ``[YOUR_MULTI-PIE_IMAGE_DIRECTORY]``
+  - Annotations: ``[YOUR_MULTI-PIE_ANNOTATION_DIRECTORY]``
+
+* SC face: ``'scface'``
+
+  - Images: ``[YOUR_SC_FACE_DIRECTORY]``
+
+* XM2VTS: ``'xm2vts'``
+
+  - Images: ``[YOUR_XM2VTS_DIRECTORY]``
+
+
+You can use the ``./bin/databases.py`` script to list, which data directories are correctly set up.
+
+
+.. _bob.bio.face.preprocessors:
+
+Preprocessors
+~~~~~~~~~~~~~
+
+Photometric enhancement algorithms are -- by default -- registered without face cropping, as ``'base'`` (no enhancement), ``'histogram'`` (histogram equalization), ``'tan-triggs'``, ``'self-quotient'`` (self quotient image) and ``'inorm-lbp'``.
+These resources should only be used, when original images are already cropped (such as in the `AT&T database`_.
+
+The default face cropping is performed by aligning the eye locations such that the eyes (in subject perspective) are located at: right eye: ``(16, 15)``, left eye: ``(16, 48)``, and the image is cropped to resolution ``(80, 64)`` pixels.
+This cropper is registered under the resource key ``'face-crop-eyes'``.
+Based on this cropping, photometric enhancement resources have a common addition: ``'histogram-crop'``, ``'tan-triggs-crop'``, ``'self-quotient-crop'`` and ``'inorm-lbp-crop'``.
+
+For face detection, two resources are registered.
+The ``'face-detect'`` resource will detect the face and perform ``'face-crop-eyes'``, without detecting the eye locations (fixed locations are taken instead).
+Hence, the in-plane rotation of the face rotation not corrected by ``'face-detect'``.
+On the other hand, in ``'landmark-detect'``, face detection and landmark localization are performed, and the face is aligned using ``'face-crop-eyes'``.
+Photometric enhancement is only registered as resource after landmark localization: ``'histogram-landmark'``, ``'tan-triggs-landmark'``, ``'self-quotient-landmark'`` and ``'inorm-lbp-landmark'``.
+
+
+.. _bob.bio.face.extractors:
+
+Feature extractors
+~~~~~~~~~~~~~~~~~~
+
+Only four types of features are registered as resources here:
+
+* ``'dct-blocks'``: DCT blocks with 12 pixels and full overlap, extracting 35 DCT features per block
+* ``'eigenface'``: Pixel vectors projected to face space keeping 95 % variance
+* ``'grid-graph'``: Gabor jets in grid graphs, with 4 pixels distance between nodes
+
+.. include:: links.rst
diff --git a/doc/implemented.rst b/doc/implemented.rst
new file mode 100644
index 0000000000000000000000000000000000000000..5ac328e000bcbc90d6e5ce3d26fd98d6163ac5dc
--- /dev/null
+++ b/doc/implemented.rst
@@ -0,0 +1,58 @@
+.. _bob.bio.face.implemented:
+
+=================================
+Tools implemented in bob.bio.face
+=================================
+
+Summary
+-------
+
+Image Preprocessors
+~~~~~~~~~~~~~~~~~~~
+
+.. autosummary::
+   bob.bio.face.preprocessor.Base
+   bob.bio.face.preprocessor.FaceCrop
+   bob.bio.face.preprocessor.FaceDetect
+
+   bob.bio.face.preprocessor.TanTriggs
+   bob.bio.face.preprocessor.HistogramEqualization
+   bob.bio.face.preprocessor.SelfQuotientImage
+   bob.bio.face.preprocessor.INormLBP
+
+
+
+Image Feature Extractors
+~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autosummary::
+   bob.bio.face.extractor.Eigenface
+   bob.bio.face.extractor.DCTBlocks
+   bob.bio.face.extractor.GridGraph
+   bob.bio.face.extractor.LGBPHS
+
+
+Face Recognition Algorithms
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+.. autosummary::
+   bob.bio.face.algorithm.GaborJet
+   bob.bio.face.algorithm.LGBPHS
+
+
+Preprocessors
+-------------
+
+.. automodule:: bob.bio.face.preprocessor
+
+Extractors
+----------
+
+.. automodule:: bob.bio.face.extractor
+
+Algorithms
+----------
+
+.. automodule:: bob.bio.face.algorithm
+
+.. include:: links.rst
diff --git a/doc/index.rst b/doc/index.rst
index 5c640a718e5cd36d66b0b24748fdc932f345addc..1ab1626863ae62c0181e14aee2ba7ee88453cea7 100644
--- a/doc/index.rst
+++ b/doc/index.rst
@@ -5,16 +5,55 @@
 .. _bob.bio.face:
 
 ===========================================
- Face recognition algorithms and databases
+ Face Recognition Algorithms and Databases
 ===========================================
 
+This package is part of the ``bob.bio`` packages, which provide open source tools to run comparable and reproducible biometric recognition experiments.
+In this package, tools for executing face recognition experiments are provided.
+This includes:
 
-Package Documentation
----------------------
+* Preprocessors to detect, align and photometrically enhance face images
+* Feature extractors that extract features from facial images
+* Recognition algorithms that are specialized on facial features, and
+* Facial image databases including their protocols.
 
-.. automodule:: bob.bio.face
+Additionally, a set of baseline algorithms are defined, which integrate well with the two other ``bob.bio`` packages:
 
-Databases
----------
+* :ref:`bob.bio.gmm <bob.bio.gmm>` defines algorithms based on Gaussian mixture models
+* :ref:`bob.bio.csu <bob.bio.csu>` provides wrapper classes of the `CSU Face Recognition Resources <http://www.cs.colostate.edu/facerec>`_ (only Python 2.7 compatible)
 
-.. automodule:: bob.bio.face.database
+For more detailed information about the structure of the ``bob.bio`` packages, please refer to the documentation of :ref:`bob.bio.base <bob.bio.base>`.
+Particularly, the installation of this and other ``bob.bio`` packages, please read the :ref:`bob.bio.base.installation`.
+
+In the following, we provide more detailed information about the particularities of this package only.
+
+===========
+Users Guide
+===========
+
+.. toctree::
+   :maxdepth: 2
+
+   baselines
+   implementation
+
+================
+Reference Manual
+================
+
+.. toctree::
+   :maxdepth: 2
+
+   implemented
+   py_api
+
+
+.. include:: references.rst
+
+ToDo-List
+=========
+
+This documentation is still under development.
+Here is a list of things that needs to be done:
+
+.. todolist::
diff --git a/doc/links.rst b/doc/links.rst
new file mode 100644
index 0000000000000000000000000000000000000000..524d215f3f0c48b3a6ca5940d610ab7611d30810
--- /dev/null
+++ b/doc/links.rst
@@ -0,0 +1,23 @@
+.. vim: set fileencoding=utf-8 :
+.. author: Manuel Günther <manuel.guenther@idiap.ch>
+.. date: Thu Sep 20 11:58:57 CEST 2012
+
+.. This file contains all links we use for documentation in a centralized place
+
+.. _facereclib: http://pypi.python.org/pypi/facereclib
+.. _idiap: http://www.idiap.ch
+.. _github: http://www.github.com/idiap
+.. _bob: http://www.idiap.ch/software/bob
+.. _github bug reporting system: http://github.com/idiap/facereclib/issues
+.. _idiap at github: http://www.github.com/bioidiap
+.. _at&t database: http://www.cl.cam.ac.uk/research/dtg/attarchive/facedatabase.html
+.. _bob's github page: http://idiap.github.com/bob
+.. _gridtk: http://github.com/idiap/gridtk
+.. _buildout: http://www.buildout.org
+.. _nist: http://www.nist.gov/itl/iad/ig/focs.cfm
+.. _pypi: http://pypi.python.org
+.. _sge: http://wiki.idiap.ch/linux/SunGridEngine
+.. _csu face recognition resources: http://www.cs.colostate.edu/facerec
+.. _xfacereclib.extension.csu: http://pypi.python.org/pypi/xfacereclib.extension.CSU
+.. _virtualbox: https://www.virtualbox.org
+.. _hdf5: http://www.hdfgroup.org/HDF5
diff --git a/doc/references.rst b/doc/references.rst
new file mode 100644
index 0000000000000000000000000000000000000000..c92431062305c25d8a133e18de031d14ab9d8b25
--- /dev/null
+++ b/doc/references.rst
@@ -0,0 +1,33 @@
+.. vim: set fileencoding=utf-8 :
+.. author: Manuel Günther <manuel.guenther@idiap.ch>
+.. date: Thu Sep 20 11:58:57 CEST 2012
+
+==========
+References
+==========
+
+.. [TP91]    *M. Turk and A. Pentland*. **Eigenfaces for recognition**. Journal of Cognitive Neuroscience, 3(1):71-86, 1991.
+.. [ZKC+98]  *W. Zhao, A. Krishnaswamy, R. Chellappa, D. Swets and J. Weng*. **Discriminant analysis of principal components for face recognition**, pages 73-85. Springer Verlag Berlin, 1998.
+.. [GHW12]   *M. Günther, D. Haufe and R.P. Würtz*. **Face recognition with disparity corrected Gabor phase differences**. In Artificial neural networks and machine learning, volume 7552 of Lecture Notes in Computer Science, pages 411-418. 9/2012.
+.. [ZSG+05]  *W. Zhang, S. Shan, W. Gao, X. Chen and H. Zhang*. **Local Gabor binary pattern histogram sequence (LGBPHS): a novel non-statistical model for face representation and recognition**. Computer Vision, IEEE International Conference on, 1:786-791, 2005.
+.. [MM09]    *C. McCool, S. Marcel*. **Parts-based face verification using local frequency bands**. In Advances in biometrics, volume 5558 of Lecture Notes in Computer Science. 2009.
+.. [WMM+12]  *R. Wallace, M. McLaren, C. McCool and S. Marcel*. **Cross-pollination of normalisation techniques from speaker to face authentication using Gaussian mixture models**. IEEE Transactions on Information Forensics and Security, 2012.
+.. [WMM+11]  *R. Wallace, M. McLaren, C. McCool and S. Marcel*. **Inter-session variability modelling and joint factor analysis for face authentication**. International Joint Conference on Biometrics. 2011.
+.. [Pri07]   *S. J. D. Prince*. **Probabilistic linear discriminant analysis for inferences about identity**. Proceedings of the International Conference on Computer Vision. 2007.
+.. [ESM+13]  *L. El Shafey, Chris McCool, Roy Wallace and Sébastien Marcel*. **A scalable formulation of probabilistic linear discriminant analysis: applied to face recognition**. IEEE Transactions on Pattern Analysis and Machine Intelligence, 35(7):1788-1794, 7/2013.
+.. [MWP98]   *B. Moghaddam, W. Wahid and A. Pentland*. **Beyond eigenfaces: probabilistic matching for face recognition**. IEEE International Conference on Automatic Face and Gesture Recognition, pages 30-35. 1998.
+
+.. [WM12]    *R. Wallace and M. McLaren*. **Total variability modelling for face verification**. IET Biometrics, vol.1, no.4, 188-199, 12/2012
+
+.. [TT10]    *X. Tan and B. Triggs*. **Enhanced local texture feature sets for face recognition under difficult lighting conditions**. IEEE Transactions on Image Processing, 19(6):1635-1650, 2010.
+.. [WLW04]   *H. Wang, S.Z. Li and Y. Wang*. **Face recognition under varying lighting conditions using self quotient image**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), pages 819-824. 2004.
+.. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
+
+.. [WFK97]   *L. Wiskott, J.-M. Fellous, N. Krüger and C.v.d. Malsburg*. **Face recognition by elastic bunch graph matching**. IEEE Transactions on Pattern Analysis and Machine Intelligence, 19:775-779, 1997.
+.. [ZSQ+09]  *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
+.. [GW09]    *M. Günther and R.P. Würtz*. **Face detection and recognition using maximum likelihood classifiers on Gabor graphs**. International Journal of Pattern Recognition and Artificial Intelligence, 23(3):433-461, 2009.
+
+.. [GWM12]   *M. Günther, R. Wallace and S. Marcel*. **An Open Source Framework for Standardized Comparisons of Face Recognition Algorithms**. Computer Vision - ECCV 2012. Workshops and Demonstrations, LNCS, 7585, 547-556, 2012.
+
+.. [PBD+11]  *P.J. Phillips, J.R. Beveridge, B.A. Draper, G. Givens, A.J. O'Toole, D.S. Bolme, J. Dunlop, Y.M. Lui, H. Sahibzada and S. Weimer*. **An introduction to the Good, the Bad, & the Ugly face recognition challenge problem**. Automatic Face Gesture Recognition and Workshops (FG 2011), pages 346-353. 2011.
+.. [LBP+12]  *Y.M. Lui, D.S. Bolme, P.J. Phillips, J.R. Beveridge and B.A. Draper*. **Preliminary studies on the Good, the Bad, and the Ugly face recognition challenge problem**. Computer Vision and Pattern Recognition Workshops (CVPRW), pages 9-16. 2012.