diff --git a/.travis.yml b/.travis.yml
index f43c2e164f1a9e6c8351994c06038143006ad5a3..0cda4dcd1c20b7efca6963102e7d35ee82517113 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -25,7 +25,7 @@ install:
 - mkdir PythonFaceEvaluation
 - wget https://www.idiap.ch/software/bob/databases/latest/PythonFaceEvaluation.tar.bz2
 - tar -xjf PythonFaceEvaluation.tar.bz2 -C PythonFaceEvaluation
-- ./bin/patch_CSU.py .
+- ./bin/patch_CSU.py PythonFaceEvaluation
 - ./bin/buildout buildout:develop="PythonFaceEvaluation ." buildout:extensions=bob.buildout buildout:auto-checkout= buildout:debug=false
 script:
 - ./bin/python -c 'import pkg_resources; from bob.bio.csu import get_config; print(get_config())'
diff --git a/bob/bio/csu/algorithm/LDAIR.py b/bob/bio/csu/algorithm/LDAIR.py
index e51e606f935af324460c5475351a3a0a3c4bb1a2..276ad7169a80e851a20a0c09c99b5df9fc4af265 100644
--- a/bob/bio/csu/algorithm/LDAIR.py
+++ b/bob/bio/csu/algorithm/LDAIR.py
@@ -41,10 +41,10 @@ class LDAIR (bob.bio.base.algorithm.Algorithm):
   """
   def __init__(
       self,
-      REGION_ARGS,
-      REGION_KEYWORDS,
-      multiple_model_scoring = 'max', # by default, compute the average between several models and the probe
-      multiple_probe_scoring = 'max'  # by default, compute the average between the model and several probes
+      REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS,
+      REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS,
+      multiple_model_scoring = 'max', # by default, compute the maximum score between several models and the probe
+      multiple_probe_scoring = 'max'  # by default, compute the maximum score between the model and several probes
   ):
     bob.bio.base.algorithm.Algorithm.__init__(self, multiple_model_scoring=multiple_model_scoring, multiple_probe_scoring=multiple_probe_scoring, **REGION_KEYWORDS)
     self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
diff --git a/bob/bio/csu/algorithm/LRPCA.py b/bob/bio/csu/algorithm/LRPCA.py
index 0c97c3d778fe0cb9b7547ad90c3f995075818f23..110b5cbf92958f6f8c4f8fbd28ee7a312dd31112 100644
--- a/bob/bio/csu/algorithm/LRPCA.py
+++ b/bob/bio/csu/algorithm/LRPCA.py
@@ -41,7 +41,7 @@ class LRPCA (bob.bio.base.algorithm.Algorithm):
 
   def __init__(
       self,
-      TUNING,
+      TUNING = facerec2010.baseline.lrpca.GBU_TUNING,
       multiple_model_scoring = 'max', # by default, compute the average between several models and the probe
       multiple_probe_scoring = 'max'  # by default, compute the average between the model and several probes
   ):
diff --git a/bob/bio/csu/extractor/LDAIR.py b/bob/bio/csu/extractor/LDAIR.py
index 1e2b3731e07469ad131c1b21165a5ac6c1f7a596..c2898d82675c3f17cd2e967a78ba5031c389c22c 100644
--- a/bob/bio/csu/extractor/LDAIR.py
+++ b/bob/bio/csu/extractor/LDAIR.py
@@ -39,7 +39,7 @@ class LDAIR (bob.bio.base.extractor.Extractor):
   REGION_KEYWORDS : dict
     The region keywords as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_KEYWORDS`.
   """
-  def __init__(self, REGION_ARGS, REGION_KEYWORDS):
+  def __init__(self, REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS, REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS):
     bob.bio.base.extractor.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **REGION_KEYWORDS)
     self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
     self.layers = len(REGION_ARGS)
diff --git a/bob/bio/csu/extractor/LRPCA.py b/bob/bio/csu/extractor/LRPCA.py
index 607ec054b4afea4a5b3cc20489b512889eec99a6..1038590116357c4448fb8b002cebff177bba4783 100644
--- a/bob/bio/csu/extractor/LRPCA.py
+++ b/bob/bio/csu/extractor/LRPCA.py
@@ -37,7 +37,7 @@ class LRPCA (bob.bio.base.extractor.Extractor):
     The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`.
   """
 
-  def __init__(self, TUNING):
+  def __init__(self, TUNING = facerec2010.baseline.lrpca.GBU_TUNING):
     bob.bio.base.extractor.Extractor.__init__(self, requires_training=True, split_training_data_by_client=True, **TUNING)
     self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
 
diff --git a/bob/bio/csu/preprocessor/LDAIR.py b/bob/bio/csu/preprocessor/LDAIR.py
index e32d35fe9d91a454d3c28ba44291ca55644de245..fec7729b7f4a2b7956b8e8dea04a1a1c53a15700 100644
--- a/bob/bio/csu/preprocessor/LDAIR.py
+++ b/bob/bio/csu/preprocessor/LDAIR.py
@@ -21,43 +21,66 @@ import facerec2010
 import pyvision
 import numpy
 import bob.bio.base
+import bob.bio.face
 
-class LDAIR (bob.bio.base.preprocessor.Preprocessor):
+class LDAIR (bob.bio.face.preprocessor.FaceCrop):
   """This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRLDA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
 
   **Parameters:**
 
-  REGION_ARGS
+  ``REGION_ARGS`` : []
     The region arguments as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_REGIONS`.
 
-  REGION_KEYWORDS
+  ``REGION_KEYWORDS`` : {}
     The region keywords as taken from :py:attr:`facerec2010.baseline.lda.CohortLDA_KEYWORDS`.
+
+  ``face_detector`` : :py:class:`bob.bio.face.preprocessor.FaceDetect` or str
+    The face detector to be used to detect the detected face.
+    Might be an instance of a :py:class:`FaceDetect` or the name of a face detector resource.
   """
 
-  def __init__(self, REGION_ARGS, REGION_KEYWORDS):
-    bob.bio.base.preprocessor.Preprocessor.__init__(self, **REGION_KEYWORDS)
+  def __init__(self, REGION_ARGS = facerec2010.baseline.lda.CohortLDA_REGIONS, REGION_KEYWORDS = facerec2010.baseline.lda.CohortLDA_KEYWORDS, face_detector = None):
+    bob.bio.base.preprocessor.Preprocessor.__init__(self, REGION_ARGS=str(REGION_ARGS), REGION_KEYWORDS=str(REGION_KEYWORDS), face_detector=str(face_detector))
     self.ldair = facerec2010.baseline.lda.LRLDA(REGION_ARGS, **REGION_KEYWORDS)
     self.layers = len(REGION_ARGS)
+    self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
 
+    if self.face_detector is not None:
+      assert isinstance(self.face_detector, bob.bio.face.preprocessor.FaceDetect)
+      # asign ourself to be the face cropper that should be used after face detection
+      self.face_detector.cropper = self
 
-  def __call__(self, image, annotations):
-    """Preprocesses the image using the LDA-IR preprocessor :py:meth:`facerec2010.baseline.lda.LRLDA.preprocess`.
+
+  def _numpy_image(self, image):
+    """Converts the givne image into a numpy color image in Bob format"""
+    np_image = image.asMatrix3D()
+    bob_image = numpy.ndarray((np_image.shape[0], np_image.shape[2], np_image.shape[1]), dtype = numpy.uint8)
+
+    # iterate over color layers
+    for j in range(np_image.shape[0]):
+      bob_image[j,:,:] = np_image[j].transpose()[:,:]
+    return bob_image
+
+
+  def crop_face(self, image, annotations):
+    """crop_face(image, annotations = None) -> face
+
+    Executes the face cropping on the given image and returns the cropped version of it.
 
     **Parameters:**
 
-    image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
-      The color image that should be preprocessed.
+    ``image`` : 3D :py:class:`numpy.ndarray`
+      The face image to be processed.
 
-    annotations : dict
+    ``annotations`` : dict
       The eye annotations for the image.
       They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.
 
     **Returns:**
 
-    preprocessed : 3D numpy.ndarray
-      The preprocessed color image, in default Bob format.
+    face : 3D :py:class:`numpy.ndarray`
+      The cropped face.
     """
-
     # assure that the eye positions are in the set of annotations
     if annotations is None or 'leye' not in annotations or 'reye' not in annotations:
       raise ValueError("The LDA-IR image cropping needs eye positions, but they are not given.")
@@ -84,17 +107,38 @@ class LDAIR (bob.bio.base.preprocessor.Preprocessor):
     assert len(tiles) == self.layers
     assert (tiles[0].asMatrix3D() == tiles[1].asMatrix3D()).all()
 
-    # Additionally, pyvision used images in (x,y)-order.
-    # To be consistent to the (y,x)-order in the facereclib, we have to transpose
-    color_image = tiles[0].asMatrix3D()
-    out_images = numpy.ndarray((color_image.shape[0], color_image.shape[2], color_image.shape[1]), dtype = numpy.uint8)
+    # return the image in the format that Bob knows and understands
+    return self._numpy_image(tiles[0])
 
-    # iterate over color layers
-    for j in range(color_image.shape[0]):
-      out_images[j,:,:] = color_image[j].transpose()[:,:]
 
-    # WARNING! This contradicts the default way, images are written. Here, we write full color information!
-    return out_images
+  def __call__(self, image, annotations):
+    """Preprocesses the image using the LDA-IR preprocessor :py:meth:`facerec2010.baseline.lda.LRLDA.preprocess`.
+
+    **Parameters:**
+
+    image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
+      The color image that should be preprocessed.
+
+    annotations : dict
+      The eye annotations for the image.
+      They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.
+
+    **Returns:**
+
+    preprocessed : 3D numpy.ndarray
+      The preprocessed color image, in default Bob format.
+    """
+    if self.face_detector is not None:
+      if isinstance(image, pyvision.Image):
+        # the face detector requires numpy arrays
+        image = self._numpy_image(image)
+        import bob.io.base
+        import bob.io.image
+        bob.io.base.save(image.astype(numpy.uint8), "test.png")
+      # call face detector with the (tansformed) image
+      return self.face_detector.crop_face(image, annotations)
+
+    return self.crop_face(image, annotations)
 
 
   def read_original_data(self, image_file):
diff --git a/bob/bio/csu/preprocessor/LRPCA.py b/bob/bio/csu/preprocessor/LRPCA.py
index 0b1a96e4de03aa38bb23c2a7cca7e25f461eeede..3f7e6e4334c4b94de072ce9f58a1649ccce00abe 100644
--- a/bob/bio/csu/preprocessor/LRPCA.py
+++ b/bob/bio/csu/preprocessor/LRPCA.py
@@ -19,24 +19,49 @@
 
 import facerec2010
 import pyvision
+import PIL
 import numpy
 import bob.bio.base
+import bob.bio.face
 
-class LRPCA (bob.bio.base.preprocessor.Preprocessor):
+class LRPCA (bob.bio.face.preprocessor.FaceCrop):
   """This class defines a wrapper for the :py:class:`facerec2010.baseline.lda.LRPCA` class to be used as an image :py:class:`bob.bio.base.preprocessor.Preprocessor`.
 
   **Parameters:**
 
-  TUNING : dict
+  ``TUNING`` : {}
     The tuning for the LRPCA algorithm as taken from the :py:attr:`facerec2010.baseline.lrpca.GBU_TUNING`.
+
+  ``face_detector`` : :py:class:`bob.bio.face.preprocessor.FaceDetect` or str
+    The face detector to be used to detect the detected face.
+    Might be an instance of a :py:class:`FaceDetect` or the name of a face detector resource.
   """
 
-  def __init__(self, TUNING):
-    bob.bio.base.preprocessor.Preprocessor.__init__(self, **TUNING)
+  def __init__(self, TUNING = facerec2010.baseline.lrpca.GBU_TUNING, face_detector = None):
+    bob.bio.base.preprocessor.Preprocessor.__init__(self, TUNING=str(TUNING), face_detector=str(face_detector))
     self.lrpca = facerec2010.baseline.lrpca.LRPCA(**TUNING)
+    self.face_detector = bob.bio.face.preprocessor.utils.load_cropper(face_detector)
 
+    if self.face_detector is not None:
+      assert isinstance(self.face_detector, bob.bio.face.preprocessor.FaceDetect)
+      # asign ourself to be the face cropper that should be used after face detection
+      self.face_detector.cropper = self
 
-  def __call__(self, image, annotations):
+  def _py_image(self, image):
+    """Converts the given image to pyvision images."""
+    pil_image = PIL.Image.new("L",(image.shape[1],image.shape[0]))
+    # TODO: Test if there is any faster method to convert the image type
+    for y in range(image.shape[0]):
+      for x in range(image.shape[1]):
+        # copy image content (re-order [y,x] to (x,y))
+        pil_image.putpixel((x,y),image[y,x])
+
+    # convert to pyvision image
+    py_image = pyvision.Image(pil_image)
+    return py_image
+
+
+  def crop_face(self, image, annotations):
     """__call__(image, annotations) -> preprocessed
     Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function.
 
@@ -55,6 +80,8 @@ class LRPCA (bob.bio.base.preprocessor.Preprocessor):
       The preprocessed image, in default Bob format.
     """
     assert isinstance(image, (pyvision.Image, numpy.ndarray))
+    if isinstance(image, numpy.ndarray):
+      image = self._py_image(image)
 
     # assure that the eye positions are in the set of annotations
     if annotations is None or 'leye' not in annotations or 'reye' not in annotations:
@@ -74,6 +101,35 @@ class LRPCA (bob.bio.base.preprocessor.Preprocessor):
     return tile.asMatrix2D().transpose()
 
 
+  def __call__(self, image, annotations):
+    """__call__(image, annotations) -> preprocessed
+    Preprocesses the image using the :py:meth:`facerec2010.baseline.lrpca.LRPCA.preprocess` function.
+
+    **Parameters:**
+
+    image : :py:class:`pyvision.Image` or :py:class:`numpy.ndarray`
+      The gray level or color image that should be preprocessed.
+
+    annotations : dict
+      The eye annotations for the image.
+      They need to be specified as ``{'reye' : (re_y, re_x), 'leye' : (le_y, le_x)}``, where right and left is in subject perspective.
+
+    **Returns:**
+
+    preprocessed : numpy.ndarray
+      The preprocessed image, in default Bob format.
+    """
+    if self.face_detector is not None:
+      if isinstance(image, pyvision.Image):
+        # the face detector requires numpy arrays
+        image = image.asMatrix2D().transpose().astype(numpy.float64)
+      # call face detector with the (tansformed) image
+      return self.face_detector.crop_face(image, annotations)
+
+    return self.crop_face(image, annotations)
+
+
+
   def read_original_data(self, image_file):
     """read_original_data(image_file) -> image
 
diff --git a/bob/bio/csu/test/data/ldair_detected.hdf5 b/bob/bio/csu/test/data/ldair_detected.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..4fd714031471954f33067cde7c14adaf0fdd49c5
Binary files /dev/null and b/bob/bio/csu/test/data/ldair_detected.hdf5 differ
diff --git a/bob/bio/csu/test/data/ldair_extracted.hdf5 b/bob/bio/csu/test/data/ldair_extracted.hdf5
index 43f271f6fc606222e30fd929d3f05d7e8db14e0f..846fc7e7c002fee7d9ee5fe78c2abaa0f5d4a59b 100644
Binary files a/bob/bio/csu/test/data/ldair_extracted.hdf5 and b/bob/bio/csu/test/data/ldair_extracted.hdf5 differ
diff --git a/bob/bio/csu/test/data/ldair_model.hdf5 b/bob/bio/csu/test/data/ldair_model.hdf5
index c8f9da5096a837e2541cd02c1ec349322ac50f99..51a7b627f809d9412d9284ae9aa248282a3dc67e 100644
Binary files a/bob/bio/csu/test/data/ldair_model.hdf5 and b/bob/bio/csu/test/data/ldair_model.hdf5 differ
diff --git a/bob/bio/csu/test/data/ldair_preprocessed.hdf5 b/bob/bio/csu/test/data/ldair_preprocessed.hdf5
index 3e5958dd55a29a5b35998af3c61519ed4e0f89d2..e087324d1176d69076df17d5410a3e73ae3a6dd7 100644
Binary files a/bob/bio/csu/test/data/ldair_preprocessed.hdf5 and b/bob/bio/csu/test/data/ldair_preprocessed.hdf5 differ
diff --git a/bob/bio/csu/test/data/lrpca_detected.hdf5 b/bob/bio/csu/test/data/lrpca_detected.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..2beab06a2ea00ac2ae451af774aece25fc355188
Binary files /dev/null and b/bob/bio/csu/test/data/lrpca_detected.hdf5 differ
diff --git a/bob/bio/csu/test/test_extractors.py b/bob/bio/csu/test/test_extractors.py
index 25d805773e3c8a5c9a00bf4b4440b137ef37b1c7..c422e4d6f3641a58f8c0da9677fab7b5e95c89e1 100644
--- a/bob/bio/csu/test/test_extractors.py
+++ b/bob/bio/csu/test/test_extractors.py
@@ -28,6 +28,8 @@ regenerate_refs = False
 import bob.bio.base
 import bob.bio.face
 import facerec2010
+import bob.io.base.test_utils
+import shutil
 
 from bob.bio.face.test.test_extractors import _compare
 
diff --git a/bob/bio/csu/test/test_preprocessors.py b/bob/bio/csu/test/test_preprocessors.py
index 062547294152fb09ade392eda5aeaebc0d65a259..fb80c93490f66de8daba378b4d186d7f23099861 100644
--- a/bob/bio/csu/test/test_preprocessors.py
+++ b/bob/bio/csu/test/test_preprocessors.py
@@ -52,6 +52,19 @@ def test_lrpca():
   # for some reason, LR-PCA produces slightly different outputs on some machines
   _compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2)
 
+def test_lrpca_detect():
+  # create preprocessor including face detector
+  preprocessor = bob.bio.csu.preprocessor.LRPCA(face_detector='landmark-detect')
+
+  # read input
+  image, annotation = _image(preprocessor), _annotation()
+
+  # execute face cropper
+  reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/lrpca_detected.hdf5')
+  # for some reason, LR-PCA produces slightly different outputs on some machines
+  _compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2)
+
+
 
 def test_ldair():
   # load resource
@@ -65,3 +78,15 @@ def test_ldair():
   # execute face cropper
   reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/ldair_preprocessed.hdf5')
   _compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1.)
+
+def test_ldair_detect():
+  # create preprocessor including face detector
+  preprocessor = bob.bio.csu.preprocessor.LDAIR(face_detector='landmark-detect')
+
+  # read input
+  image, annotation = _image(preprocessor), _annotation()
+
+  # execute face cropper
+  reference = pkg_resources.resource_filename('bob.bio.csu.test', 'data/ldair_detected.hdf5')
+  # for some reason, LR-PCA produces slightly different outputs on some machines
+  _compare(preprocessor(image, annotation), reference, preprocessor.write_data, preprocessor.read_data, atol=1., rtol=1e-2)
diff --git a/buildout.cfg b/buildout.cfg
index 5603b0620991da0050be799fe541eb0f79c72b2e..b16ffda8452612324565c513200e3d9558832f46 100644
--- a/buildout.cfg
+++ b/buildout.cfg
@@ -37,7 +37,8 @@ develop = src/bob.extension
           src/bob.learn.boosting
           src/bob.ip.facedetect
           src/bob.ip.flandmark
-          ${buildout:csu-dir}/PythonFaceEvaluation
+          src/bob.bio.face
+          ${buildout:csu-dir}
           .
 
 csu-dir = [PATH_TO_YOUR_CSU_COPY]
@@ -71,6 +72,7 @@ bob.bio.base = git https://github.com/bioidiap/bob.bio.base
 bob.learn.boosting = git https://github.com/bioidiap/bob.learn.boosting
 bob.ip.facedetect = git https://github.com/bioidiap/bob.ip.facedetect
 bob.ip.flandmark = git https://github.com/bioidiap/bob.ip.flandmark
+bob.bio.face = git https://github.com/bioidiap/bob.bio.face
 
 [scripts]
 recipe = bob.buildout:scripts
diff --git a/patch/__init__.py b/patch/__init__.py
index 28687bbf20d4c7aae8472a25062413515acbdabb..3ca0ab423b69155bd1bd17096572e80d8e7b1fe5 100644
--- a/patch/__init__.py
+++ b/patch/__init__.py
@@ -34,9 +34,9 @@ def main():
     raise ValueError("The given directory '%s' does not exist." % sys.argv[1])
 
 
-  base_dir = os.path.join(sys.argv[1], 'PythonFaceEvaluation')
+  base_dir = sys.argv[1]
   if not os.path.isdir(base_dir):
-    raise IOError("The given directory '%s' does not contain a 'PythonFaceEvaluation' subdirectory. Please specify the base directory of the PythonFaceEvaluation toolkit." % sys.argv[1])
+    raise IOError("The given directory '%s' does not exist. Please specify the directory of the PythonFaceEvaluation toolkit." % sys.argv[1])
 
   if os.path.isfile(os.path.join(base_dir, 'setup.py')):
     raise IOError("The given directory '%s' already seems to be patched." % sys.argv[1])