diff --git a/bob/bio/face/preprocessor/FaceCrop.py b/bob/bio/face/preprocessor/FaceCrop.py
index 9b04b9decc79e3099b987b1e67c2843e34a096e0..23cfc2494eff78c86703cc6b9f2ff098a73a43d1 100644
--- a/bob/bio/face/preprocessor/FaceCrop.py
+++ b/bob/bio/face/preprocessor/FaceCrop.py
@@ -373,12 +373,26 @@ class FaceCrop(Base):
         self._init_non_pickables()
 
 
-class MultiFaceCrop(TransformerMixin, BaseEstimator):
+class MultiFaceCrop(Base):
+    """ Wraps around FaceCrop to enable a dynamical cropper that can handle several annotation types.
+    Initialization and usage is similar to the FaceCrop, but the main difference here is that one specifies
+    a *list* of cropped_positions, and optionally a *list* of associated fixed positions.
+
+    For each set of cropped_positions in the list, a new FaceCrop will be instanciated that handles this
+    exact set of annotations.
+    When calling the *transform* method, the MultiFaceCrop matches each sample to its associated cropper
+    based on the received annotation, then performs the cropping of each subset, and finally gathers the results.
+
+    In case of ambiguity (when no cropper is a match for the received annotations, or when several croppers
+    match the received annotations), raises a ValueError.
+
+    """
+
     def __init__(
         self,
         cropped_image_size,
         cropped_positions_list,
-        fixed_positions=None,
+        fixed_positions_list=None,
         mask_sigma=None,
         mask_neighbors=5,
         mask_seed=None,
@@ -388,9 +402,14 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
     ):
 
         assert isinstance(cropped_positions_list, list)
+        if fixed_positions_list is None:
+            fixed_positions_list = [None] * len(cropped_positions_list)
+        assert isinstance(fixed_positions_list, list)
 
         self.croppers = {}
-        for cropped_positions in cropped_positions_list:
+        for cropped_positions, fixed_positions in zip(
+            cropped_positions_list, fixed_positions_list
+        ):
             assert len(cropped_positions) == 2
             self.croppers[tuple(cropped_positions)] = FaceCrop(
                 cropped_image_size,
@@ -408,26 +427,38 @@ class MultiFaceCrop(TransformerMixin, BaseEstimator):
         subsets = {k: {"X": [], "annotations": []} for k in self.croppers.keys()}
 
         def assign(X_elem, annotations_elem):
+            # Assign a single sample to its matching cropper
+
+            # Compare the received annotations keys to the cropped_positions keys of each cropper
             valid_keys = [
                 k
                 for k in self.croppers.keys()
                 if set(k).issubset(set(annotations_elem.keys()))
             ]
-            assert (
-                len(valid_keys) == 1
-            ), "Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
-                len(valid_keys)
-            )
-            subsets[valid_keys[0]]["X"].append(X_elem)
-            subsets[valid_keys[0]]["annotations"].append(annotations_elem)
 
+            # Ensure exactly one cropper is a match
+            if len(valid_keys) != 1:
+                raise ValueError(
+                    "Cropper selection from the annotations is ambiguous ({} valid croppers)".format(
+                        len(valid_keys)
+                    )
+                )
+            else:
+                # Assign the sample to this particuler cropper
+                cropper_key = valid_keys[0]
+                subsets[cropper_key]["X"].append(X_elem)
+                subsets[cropper_key]["annotations"].append(annotations_elem)
+
+        # Assign each sample to its matching cropper
         for X_elem, annotations_elem in zip(X, annotations):
             assign(X_elem, annotations_elem)
 
+        # Call each FaceCrop on its sample subset
         transformed_subsets = {
             k: self.croppers[k].transform(**subsets[k]) for k in subsets.keys()
         }
 
+        # Gather the results
         return [item for sublist in transformed_subsets.values() for item in sublist]
 
     def fit(self, X, y=None):
diff --git a/bob/bio/face/preprocessor/__init__.py b/bob/bio/face/preprocessor/__init__.py
index 61ac1346cc73dd165e717f91595f08969f300d09..1a0c9b64a3e248993333f3161503a1d3b2eae1af 100644
--- a/bob/bio/face/preprocessor/__init__.py
+++ b/bob/bio/face/preprocessor/__init__.py
@@ -1,5 +1,5 @@
 from .Base import Base
-from .FaceCrop import FaceCrop
+from .FaceCrop import FaceCrop, MultiFaceCrop
 
 from .TanTriggs import TanTriggs
 from .INormLBP import INormLBP
diff --git a/bob/bio/face/test/data/cropped_bbox.hdf5 b/bob/bio/face/test/data/cropped_bbox.hdf5
new file mode 100644
index 0000000000000000000000000000000000000000..a0048061a5901e7af7d5278cd0430f79294254bd
Binary files /dev/null and b/bob/bio/face/test/data/cropped_bbox.hdf5 differ
diff --git a/bob/bio/face/test/data/testimage_bbox.pos b/bob/bio/face/test/data/testimage_bbox.pos
new file mode 100644
index 0000000000000000000000000000000000000000..933a7652a24eb5a8b8a0663100a3c729c3ff1fbd
--- /dev/null
+++ b/bob/bio/face/test/data/testimage_bbox.pos
@@ -0,0 +1,2 @@
+topleft 85 130
+bottomright 270 330
diff --git a/bob/bio/face/test/test_preprocessors.py b/bob/bio/face/test/test_preprocessors.py
index 4b02fc8c46a8095a5f2a742ced9213f4199294d6..51e6675f5c50efd009f04508800577f347a0671d 100644
--- a/bob/bio/face/test/test_preprocessors.py
+++ b/bob/bio/face/test/test_preprocessors.py
@@ -151,6 +151,56 @@ def test_face_crop():
     # reset the configuration, so that later tests don't get screwed.
     cropper.color_channel = "gray"
 
+def test_multi_face_crop():
+    # read input
+    image = _image()
+    eye_annotation, bbox_annotation = [
+            bob.db.base.read_annotation_file(
+                pkg_resources.resource_filename("bob.bio.face.test", "data/" + filename + ".pos"),
+                "named"
+            )
+            for filename in ["testimage", "testimage_bbox"]
+        ]
+
+    # define the preprocessor
+    cropper = bob.bio.face.preprocessor.MultiFaceCrop(
+        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
+        cropped_positions_list=[
+            {'leye': LEFT_EYE_POS, 'reye': RIGHT_EYE_POS},
+            {'topleft': (0, 0), 'bottomright': (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)}
+        ]
+    )
+
+    # execute face cropper
+    eye_reference, bbox_reference = [
+        pkg_resources.resource_filename(
+            "bob.bio.face.test", "data/" + filename + ".hdf5"
+        )
+        for filename in ["cropped", "cropped_bbox"]
+    ]
+
+    eye_cropped, bbox_cropped = cropper.transform([image, image], [eye_annotation, bbox_annotation])
+    
+    # Compare the cropped results to the reference
+    _compare(eye_cropped, eye_reference)
+    _compare(bbox_cropped, bbox_reference)
+
+     # test a ValueError is raised if the annotations don't match any cropper
+    try:
+        annot = dict(landmark_A=(60, 60), landmark_B=(120, 120))
+        cropper.transform([image], [annot])
+        assert 0, "MultiFaceCrop did not raise a ValueError for annotations matching no cropper"
+    except ValueError:
+        pass
+
+    # test a ValueError is raised if the annotations match several croppers
+    try:
+        annot = {**eye_annotation, **bbox_annotation}
+        cropper.transform([image], [annot])
+        assert 0, "MultiFaceCrop did not raise a ValueError for annotations matching several croppers"
+    except ValueError:
+        pass
+
 
 def test_tan_triggs():
     # read input