diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
index 3261e8ff815e23c2edd3609ce32d2a0b9f31a1af..f5eba5286f783b20d3a07d852d3fa5cc78c4999c 100644
--- a/bob/pad/face/config/lbp_svm.py
+++ b/bob/pad/face/config/lbp_svm.py
@@ -20,35 +20,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50  # Minimal possible size of the face
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    color_channel=COLOR_CHANNEL)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = False # Gray-scale output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
 """
 
 #=======================================================================================
diff --git a/bob/pad/face/config/lbp_svm_aggregated_db.py b/bob/pad/face/config/lbp_svm_aggregated_db.py
index 778364e8c7d69047cc4218cbec6136312e05161d..fb84bd471c3f64caeefc48441c98299dec096a18 100644
--- a/bob/pad/face/config/lbp_svm_aggregated_db.py
+++ b/bob/pad/face/config/lbp_svm_aggregated_db.py
@@ -22,35 +22,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50  # Minimal possible size of the face
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    color_channel=COLOR_CHANNEL)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = False # Gray-scale output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces with the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``.
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``.
 """
 
 #=======================================================================================
diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py
index 08ed1448ed1240d7ce60ef81de560ad336a0ba5d..57bb5901fc530fc2e20c4624ddd401ddb656f83e 100644
--- a/bob/pad/face/config/preprocessor/video_face_crop.py
+++ b/bob/pad/face/config/preprocessor/video_face_crop.py
@@ -1,61 +1,38 @@
 #!/usr/bin/env python2
 # -*- coding: utf-8 -*-
 
-from bob.pad.face.preprocessor import VideoFaceCrop
+from ..preprocessor import FaceCropAlign
 
-#=======================================================================================
+from bob.bio.video.preprocessor import Wrapper
+
+# =======================================================================================
 # Define instances here:
 
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-DETECT_FACES_FLAG = True  # find annotations locally replacing the database annotations
-FACE_DETECTION_METHOD = "dlib"
-
-rgb_face_detector_dlib = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG,
-    detect_faces_flag=DETECT_FACES_FLAG,
-    face_detection_method=FACE_DETECTION_METHOD)
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-DETECT_FACES_FLAG = True  # find annotations locally replacing the database annotations
-FACE_DETECTION_METHOD = "mtcnn"
-
-rgb_face_detector_mtcnn = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG,
-    detect_faces_flag=DETECT_FACES_FLAG,
-    face_detection_method=FACE_DETECTION_METHOD)
+
+FACE_SIZE = 64  # The size of the resulting face
+RGB_OUTPUT_FLAG = True  # RGB output
+USE_FACE_ALIGNMENT = False  #
+MAX_IMAGE_SIZE = None  # no limiting here
+FACE_DETECTION_METHOD = "dlib"  # use dlib face detection
+MIN_FACE_SIZE = 50  # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                   rgb_output_flag=RGB_OUTPUT_FLAG,
+                                   use_face_alignment=USE_FACE_ALIGNMENT,
+                                   max_image_size=MAX_IMAGE_SIZE,
+                                   face_detection_method=FACE_DETECTION_METHOD,
+                                   min_face_size=MIN_FACE_SIZE)
+
+rgb_face_detector_dlib = Wrapper(image_preprocessor)
+
+# =======================================================================================
+FACE_DETECTION_METHOD = "mtcnn"  # use mtcnn face detection
+
+image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                   rgb_output_flag=RGB_OUTPUT_FLAG,
+                                   use_face_alignment=USE_FACE_ALIGNMENT,
+                                   max_image_size=MAX_IMAGE_SIZE,
+                                   face_detection_method=FACE_DETECTION_METHOD,
+                                   min_face_size=MIN_FACE_SIZE)
+
+rgb_face_detector_mtcnn = Wrapper(image_preprocessor)
diff --git a/bob/pad/face/config/qm_lr.py b/bob/pad/face/config/qm_lr.py
index 19a5d4221f0156c01e32d78a09ed848468416ff3..1e7522c046720a6313cc1d58aa3102b22daeca99 100644
--- a/bob/pad/face/config/qm_lr.py
+++ b/bob/pad/face/config/qm_lr.py
@@ -19,35 +19,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
diff --git a/bob/pad/face/config/qm_one_class_gmm.py b/bob/pad/face/config/qm_one_class_gmm.py
index 182c71bbbe8ef8783c81c9d42a2467e6371d5d0c..52d9973910761d74b8df9686630abfef287e7b01 100644
--- a/bob/pad/face/config/qm_one_class_gmm.py
+++ b/bob/pad/face/config/qm_one_class_gmm.py
@@ -19,35 +19,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
diff --git a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
index 1216d66bf9af9cf324c0b8cc0c947eb342a78ecf..1951849d1debcca6533db74995f4221558f0aea9 100644
--- a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
@@ -21,35 +21,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
diff --git a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
index 7fcc6122020478a7828f70e357e37cd5fa1e082f..01679f8396e5a8ab7ef974bed0f9885f8f2be83f 100644
--- a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
@@ -21,35 +21,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
diff --git a/bob/pad/face/config/qm_svm.py b/bob/pad/face/config/qm_svm.py
index 961a413fdbbc294ac325b2d5a694af3689cfae48..8e742ff2246684976233c18205d3516f80f4ee46 100644
--- a/bob/pad/face/config/qm_svm.py
+++ b/bob/pad/face/config/qm_svm.py
@@ -19,35 +19,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
diff --git a/bob/pad/face/config/qm_svm_aggregated_db.py b/bob/pad/face/config/qm_svm_aggregated_db.py
index 481c50b85b7ec42eaf5a1e4f59999d48bd38f605..1acd37d94b37217fb58173150db9dca06b0f591e 100644
--- a/bob/pad/face/config/qm_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_svm_aggregated_db.py
@@ -21,35 +21,30 @@ this resource.
 #=======================================================================================
 # define preprocessor:
 
-from ..preprocessor import VideoFaceCrop
-
-CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-FIXED_POSITIONS = None
-MASK_SIGMA = None  # The sigma for random values areas outside image
-MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-MASK_SEED = None  # The seed for generating random values during extrapolation
-CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-MIN_FACE_SIZE = 50
-USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-
-preprocessor = VideoFaceCrop(
-    cropped_image_size=CROPPED_IMAGE_SIZE,
-    cropped_positions=CROPPED_POSITIONS,
-    fixed_positions=FIXED_POSITIONS,
-    mask_sigma=MASK_SIGMA,
-    mask_neighbors=MASK_NEIGHBORS,
-    mask_seed=None,
-    check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-    min_face_size=MIN_FACE_SIZE,
-    use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-    rgb_output_flag=RGB_OUTPUT_FLAG)
+from ..preprocessor import FaceCropAlign
+
+from bob.bio.video.preprocessor import Wrapper
+
+FACE_SIZE = 64 # The size of the resulting face
+RGB_OUTPUT_FLAG = True # RGB output
+USE_FACE_ALIGNMENT = False # use annotations
+MAX_IMAGE_SIZE = None # no limiting here
+FACE_DETECTION_METHOD = None # use annotations
+MIN_FACE_SIZE = 50 # skip small faces
+
+image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                   rgb_output_flag = RGB_OUTPUT_FLAG,
+                                   use_face_alignment = USE_FACE_ALIGNMENT,
+                                   max_image_size = MAX_IMAGE_SIZE,
+                                   face_detection_method = FACE_DETECTION_METHOD,
+                                   min_face_size = MIN_FACE_SIZE)
+
+preprocessor = Wrapper(image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
-The size of the face is normalized to ``cropped_image_size`` dimensions. The faces of the size
-below ``min_face_size`` threshold are discarded. The preprocessor is similar to the one introduced in
-[CAM12]_, which is defined by ``use_local_cropper_flag = True``. The preprocessed frame is the RGB
+The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
+below ``MIN_FACE_SIZE`` threshold are discarded. The preprocessor is similar to the one introduced in
+[CAM12]_, which is defined by ``FACE_DETECTION_METHOD = None``. The preprocessed frame is the RGB
 facial image, which is defined by ``RGB_OUTPUT_FLAG = True``.
 """
 
diff --git a/bob/pad/face/preprocessor/VideoFaceCrop.py b/bob/pad/face/preprocessor/VideoFaceCrop.py
deleted file mode 100644
index b5433657427f8a7b91e59b5e84aa6055cdb8e057..0000000000000000000000000000000000000000
--- a/bob/pad/face/preprocessor/VideoFaceCrop.py
+++ /dev/null
@@ -1,363 +0,0 @@
-#!/usr/bin/env python2
-# -*- coding: utf-8 -*-
-"""
-Created on Fri May 12 14:14:23 2017
-
-@author: Olegs Nikisins
-"""
-#==============================================================================
-# Import what is needed here:
-
-from bob.bio.base.preprocessor import Preprocessor
-
-from bob.bio.face.preprocessor import FaceCrop
-
-import bob.bio.video
-
-import numpy as np
-
-from bob.pad.face.preprocessor.ImageFaceCrop import ImageFaceCrop
-
-from ..utils.face_detection_utils import detect_faces_in_video
-
-#==============================================================================
-# Main body:
-
-
-class VideoFaceCrop(Preprocessor, object):
-    """
-    This class is designed to crop faces in each frame of the input video given
-    annotations defining the position of the face.
-
-    **Parameters:**
-
-    ``cropped_image_size`` : (int, int)
-        The size of the resulting cropped images.
-
-    ``cropped_positions`` : :py:class:`dict`
-        The coordinates in the cropped image, where the annotated points should be put to.
-        This parameter is a dictionary with usually two elements, e.g., ``{'reye':(RIGHT_EYE_Y, RIGHT_EYE_X) , 'leye':(LEFT_EYE_Y, LEFT_EYE_X)}``.
-        However, also other parameters, such as ``{'topleft' : ..., 'bottomright' : ...}`` are supported, as long as the ``annotations`` in the `__call__` function are present.
-
-    ``fixed_positions`` : :py:class:`dict`
-        Or None.
-        If specified, ignore the annotations from the database and use these fixed positions throughout.
-
-    ``mask_sigma`` : :py:class:`float`
-        Or None
-        Fill the area outside of image boundaries with random pixels from the border, by adding noise to the pixel values.
-        To disable extrapolation, set this value to ``None``.
-        To disable adding random noise, set it to a negative value or 0.
-
-    ``mask_neighbors`` : :py:class:`int`
-        The number of neighbors used during mask extrapolation.
-        See :py:func:`bob.ip.base.extrapolate_mask` for details.
-
-    ``mask_seed`` : :py:class:`int`
-        Or None.
-        The random seed to apply for mask extrapolation.
-
-        .. warning::
-          When run in parallel, the same random seed will be applied to all parallel processes.
-          Hence, results of parallel execution will differ from the results in serial execution.
-
-    ``check_face_size_flag`` : :py:class:`bool`
-        If True, only return the frames containing faces of the size above the
-        specified threshold ``min_face_size``. Default: False.
-
-    ``min_face_size`` : :py:class:`int`
-        The minimal size of the face in pixels. Only valid when ``check_face_size_flag``
-        is set to True. Default: 50.
-
-    ``use_local_cropper_flag`` : :py:class:`bool`
-        If True, use the local ImageFaceCrop class to crop faces in the frames.
-        Otherwise, the FaceCrop preprocessor from bob.bio.face is used.
-        Default: False.
-
-    ``rgb_output_flag`` : :py:class:`bool`
-        Return RGB cropped face if ``True``, otherwise a gray-scale image is
-        returned. This flag is only valid when ``use_local_cropper_flag = True``.
-        Default: ``False``.
-
-    ``detect_faces_flag`` : :py:class:`bool`
-        If set to ``True`` the facial annotations will be generated using
-        face detection. Otherwise, annotations of the database are used for
-        cropping.
-        Default: ``False``.
-
-    ``face_detection_method`` : :py:class:`str`
-        A package to be used for face detection. Options supported by this
-        package: "dlib" (dlib is a dependency of this package). If  bob.ip.mtcnn
-        is installed in your system you can use it as-well (bob.ip.mtcnn is NOT
-        a dependency of this package).
-
-    ``kwargs``
-        Remaining keyword parameters passed to the Base constructor, such as ``color_channel`` or ``dtype``.
-    """
-
-    #==========================================================================
-    def __init__(self,
-                 cropped_image_size,
-                 cropped_positions,
-                 fixed_positions=None,
-                 mask_sigma=None,
-                 mask_neighbors=5,
-                 mask_seed=None,
-                 check_face_size_flag=False,
-                 min_face_size=50,
-                 use_local_cropper_flag=False,
-                 rgb_output_flag=False,
-                 detect_faces_flag=False,
-                 face_detection_method="dlib",
-                 **kwargs):
-
-        super(VideoFaceCrop, self).__init__(
-            cropped_image_size=cropped_image_size,
-            cropped_positions=cropped_positions,
-            fixed_positions=fixed_positions,
-            mask_sigma=mask_sigma,
-            mask_neighbors=mask_neighbors,
-            mask_seed=mask_seed,
-            check_face_size_flag=check_face_size_flag,
-            min_face_size=min_face_size,
-            use_local_cropper_flag=use_local_cropper_flag,
-            rgb_output_flag=rgb_output_flag,
-            detect_faces_flag=detect_faces_flag,
-            face_detection_method=face_detection_method,
-            **kwargs)
-
-        self.cropped_image_size = cropped_image_size
-        self.cropped_positions = cropped_positions
-        self.fixed_positions = fixed_positions
-        self.mask_sigma = mask_sigma
-        self.mask_neighbors = mask_neighbors
-        self.mask_seed = mask_seed
-        self.check_face_size_flag = check_face_size_flag
-        self.min_face_size = min_face_size
-        self.use_local_cropper_flag = use_local_cropper_flag
-        self.rgb_output_flag = rgb_output_flag
-        self.detect_faces_flag = detect_faces_flag
-        self.face_detection_method = face_detection_method
-
-        # Save also the data stored in the kwargs:
-        for (k, v) in kwargs.items():
-            setattr(self, k, v)
-
-        if self.use_local_cropper_flag:
-
-            preprocessor = ImageFaceCrop(
-                face_size=self.cropped_image_size[0],
-                rgb_output_flag=self.rgb_output_flag)
-
-        else:
-
-            preprocessor = FaceCrop(
-                cropped_image_size=self.cropped_image_size,
-                cropped_positions=self.cropped_positions,
-                fixed_positions=self.fixed_positions,
-                mask_sigma=self.mask_sigma,
-                mask_neighbors=self.mask_neighbors,
-                mask_seed=self.mask_seed,
-                **kwargs)
-
-        self.video_preprocessor = bob.bio.video.preprocessor.Wrapper(
-            preprocessor)
-
-    #==========================================================================
-    def check_face_size(self, frame_container, annotations, min_face_size):
-        """
-        Return the FrameContainer containing the frames with faces of the
-        size overcoming the specified threshold.
-
-        **Parameters:**
-
-        ``frame_container`` : FrameContainer
-            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-            for further details.
-
-        ``annotations`` : :py:class:`dict`
-            A dictionary containing the annotations for each frame in the video.
-            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
-            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
-            is the dictionary defining the coordinates of the face bounding box in frame N.
-
-        ``min_face_size`` : :py:class:`int`
-            The minimal size of the face in pixels.
-
-        **Returns:**
-
-        ``cleaned_frame_container`` : FrameContainer
-            FrameContainer containing the frames with faces of the size
-            overcoming the specified threshold.
-        """
-
-        cleaned_frame_container = bob.bio.video.FrameContainer(
-        )  # initialize the FrameContainer
-
-        selected_frame_idx = 0
-
-        for idx in range(0,
-                         np.min([len(annotations),
-                                 len(frame_container)])):  # idx - frame index
-
-            frame_annotations = annotations[str(
-                idx)]  # annotations for particular frame
-
-            # size of current face
-            face_size = np.min(
-                np.array(frame_annotations['bottomright']) -
-                np.array(frame_annotations['topleft']))
-
-            if face_size >= min_face_size:  # check if face size is above the threshold
-
-                selected_frame = frame_container[idx][1]  # get current frame
-
-                cleaned_frame_container.add(
-                    selected_frame_idx,
-                    selected_frame)  # add current frame to FrameContainer
-
-                selected_frame_idx = selected_frame_idx + 1
-
-        return cleaned_frame_container
-
-    #==========================================================================
-    def select_annotated_frames(self, frames, annotations):
-        """
-        Select only annotated frames in the input FrameContainer ``frames``.
-
-        **Parameters:**
-
-        ``frames`` : FrameContainer
-            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-            for further details.
-
-        ``annotations`` : :py:class:`dict`
-            A dictionary containing the annotations for each frame in the video.
-            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
-            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
-            is the dictionary defining the coordinates of the face bounding box in frame N.
-
-        **Returns:**
-
-        ``cleaned_frame_container`` : FrameContainer
-            FrameContainer containing the annotated frames only.
-
-        ``cleaned_annotations`` : :py:class:`dict`
-            A dictionary containing the annotations for each frame in the output video.
-            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
-            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
-            is the dictionary defining the coordinates of the face bounding box in frame N.
-        """
-
-        annotated_frames = np.sort([
-            np.int(item) for item in annotations.keys()
-        ])  # annotated frame numbers
-
-        available_frames = range(
-            0, len(frames))  # frame numbers in the input video
-
-        valid_frames = list(
-            set(annotated_frames).intersection(
-                available_frames))  # valid and annotated frames
-
-        cleaned_frame_container = bob.bio.video.FrameContainer(
-        )  # initialize the FrameContainer
-
-        cleaned_annotations = {}
-
-        for idx, valid_frame_num in enumerate(valid_frames):
-            ## valid_frame_num - is the number of the original frame having annotations
-
-            cleaned_annotations[str(idx)] = annotations[str(
-                valid_frame_num)]  # correct the frame numbers
-
-            selected_frame = frames[valid_frame_num][1]  # get current frame
-
-            cleaned_frame_container.add(
-                idx, selected_frame)  # add current frame to FrameContainer
-
-        return cleaned_frame_container, cleaned_annotations
-
-    #==========================================================================
-    def __call__(self, frames, annotations):
-        """
-        Crop the face in the input video frames given annotations for each frame.
-
-        **Parameters:**
-
-        ``frames`` : FrameContainer
-            Video data stored in the FrameContainer, see ``bob.bio.video.utils.FrameContainer``
-            for further details.
-
-        ``annotations`` : :py:class:`dict`
-            A dictionary containing the annotations for each frame in the video.
-            Dictionary structure: ``annotations = {'1': frame1_dict, '2': frame1_dict, ...}``.
-            Where ``frameN_dict = {'topleft': (row, col), 'bottomright': (row, col)}``
-            is the dictionary defining the coordinates of the face bounding box in frame N.
-
-        **Returns:**
-
-        ``preprocessed_video`` : FrameContainer
-            Cropped faces stored in the FrameContainer.
-        """
-
-        if self.detect_faces_flag:
-
-            annotations = detect_faces_in_video(frames,
-                                                self.face_detection_method)
-
-        if len(frames) != len(annotations):  # if some annotations are missing
-
-            ## Select only annotated frames:
-            frames, annotations = self.select_annotated_frames(
-                frames, annotations)
-
-        preprocessed_video = self.video_preprocessor(
-            frames=frames, annotations=annotations)
-
-        if self.check_face_size_flag:
-
-            preprocessed_video = self.check_face_size(
-                preprocessed_video, annotations, self.min_face_size)
-
-        return preprocessed_video
-
-    #==========================================================================
-    def write_data(self, frames, file_name):
-        """
-        Writes the given data (that has been generated using the __call__ function of this class) to file.
-        This method overwrites the write_data() method of the Preprocessor class.
-
-        **Parameters:**
-
-        ``frames`` :
-            data returned by the __call__ method of the class.
-
-        ``file_name`` : :py:class:`str`
-            name of the file.
-        """
-
-        if frames:  # save file if FrameContainer is not empty, otherwise do nothing.
-
-            self.video_preprocessor.write_data(frames, file_name)
-
-    #==========================================================================
-    def read_data(self, file_name):
-        """
-        Reads the preprocessed data from file.
-        This method overwrites the read_data() method of the Preprocessor class.
-
-        **Parameters:**
-
-        ``file_name`` : :py:class:`str`
-            name of the file.
-
-        **Returns:**
-
-        ``frames`` : :py:class:`bob.bio.video.FrameContainer`
-            Frames stored in the frame container.
-        """
-
-        frames = self.video_preprocessor.read_data(file_name)
-
-        return frames
diff --git a/bob/pad/face/preprocessor/__init__.py b/bob/pad/face/preprocessor/__init__.py
index fed0e8aef0729757f97d6536d088278204efa4f9..5f43d3e2fcdacb09d1feed125c727d2c0a9f7234 100644
--- a/bob/pad/face/preprocessor/__init__.py
+++ b/bob/pad/face/preprocessor/__init__.py
@@ -1,4 +1,3 @@
-from .VideoFaceCrop import VideoFaceCrop
 from .FaceCropAlign import FaceCropAlign
 from .FrameDifference import FrameDifference
 from .VideoSparseCoding import VideoSparseCoding
@@ -23,7 +22,6 @@ def __appropriate__(*args):
 
 
 __appropriate__(
-    VideoFaceCrop,
     FaceCropAlign,
     FrameDifference,
     VideoSparseCoding,
diff --git a/bob/pad/face/test/test.py b/bob/pad/face/test/test.py
index 5693bb7fa89f2f775de938ee60d79d78fadf93f7..c4d1a9bd5073019e44b2d97d9f2e67fbd9de19e2 100644
--- a/bob/pad/face/test/test.py
+++ b/bob/pad/face/test/test.py
@@ -18,9 +18,7 @@ from bob.ip.color import rgb_to_gray
 
 from ..extractor import LBPHistogram
 
-from ..preprocessor import ImageFaceCrop
-
-from ..preprocessor import VideoFaceCrop
+from ..preprocessor import FaceCropAlign
 
 from ..preprocessor import FrameDifference
 
@@ -34,6 +32,8 @@ from ..utils import face_detection_utils
 
 import random
 
+from bob.bio.video.preprocessor import Wrapper
+
 
 def test_detect_face_landmarks_in_image_mtcnn():
 
@@ -104,21 +104,21 @@ def test_lbp_histogram():
 
 
 #==============================================================================
-def test_image_face_crop():
+def test_face_crop_align():
     """
-    Test ImageFaceCrop preprocessor, which is designed to crop faces in the images.
+    Test FaceCropAlign preprocessor, which is designed to crop faces in the images.
     """
 
     image = load(datafile('test_image.png', 'bob.pad.face.test'))
     annotations = {'topleft': (95, 155), 'bottomright': (215, 265)}
 
-    preprocessor = ImageFaceCrop(face_size=64, rgb_output_flag=False)
+    preprocessor = FaceCropAlign(face_size=64, rgb_output_flag=False, use_face_alignment=False)
     face = preprocessor(image, annotations)
 
     assert face.shape == (64, 64)
     assert np.sum(face) == 429158
 
-    preprocessor = ImageFaceCrop(face_size=64, rgb_output_flag=True)
+    preprocessor = FaceCropAlign(face_size=64, rgb_output_flag=True, use_face_alignment=False)
     face = preprocessor(image, annotations)
 
     assert face.shape == (3, 64, 64)
@@ -172,35 +172,28 @@ def convert_image_to_video_data(image, annotations, n_frames):
 #==============================================================================
 def test_video_face_crop():
     """
-    Test VideoFaceCrop preprocessor, which is designed to crop faces in the video.
+    Test FaceCropAlign preprocessor with Wrapper, which is designed to crop faces in the video.
     """
 
+    FACE_SIZE = 64 # The size of the resulting face
+    RGB_OUTPUT_FLAG = False # Gray-scale output
+    USE_FACE_ALIGNMENT = False # use annotations
+    MAX_IMAGE_SIZE = None # no limiting here
+    FACE_DETECTION_METHOD = None # use annotations
+    MIN_FACE_SIZE = 50 # skip small faces
+
+    image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                       rgb_output_flag = RGB_OUTPUT_FLAG,
+                                       use_face_alignment = USE_FACE_ALIGNMENT,
+                                       max_image_size = MAX_IMAGE_SIZE,
+                                       face_detection_method = FACE_DETECTION_METHOD,
+                                       min_face_size = MIN_FACE_SIZE)
+
+    preprocessor = Wrapper(image_preprocessor)
+
     image = load(datafile('test_image.png', 'bob.pad.face.test'))
     annotations = {'topleft': (95, 155), 'bottomright': (215, 265)}
 
-    CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-    CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-    FIXED_POSITIONS = None
-    MASK_SIGMA = None  # The sigma for random values areas outside image
-    MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-    MASK_SEED = None  # The seed for generating random values during extrapolation
-    CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-    MIN_FACE_SIZE = 50  # Minimal possible size of the face
-    USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-    COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
-
-    preprocessor = VideoFaceCrop(
-        cropped_image_size=CROPPED_IMAGE_SIZE,
-        cropped_positions=CROPPED_POSITIONS,
-        fixed_positions=FIXED_POSITIONS,
-        mask_sigma=MASK_SIGMA,
-        mask_neighbors=MASK_NEIGHBORS,
-        mask_seed=MASK_SEED,
-        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-        min_face_size=MIN_FACE_SIZE,
-        use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-        color_channel=COLOR_CHANNEL)
-
     video, annotations = convert_image_to_video_data(image, annotations, 20)
 
     faces = preprocessor(frames=video, annotations=annotations)
@@ -212,34 +205,23 @@ def test_video_face_crop():
     assert np.sum(faces[-1][1]) == 429158
 
     #==========================================================================
-    # test another configuration of the  VideoFaceCrop preprocessor:
-
-    CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-    CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-    FIXED_POSITIONS = None
-    MASK_SIGMA = None  # The sigma for random values areas outside image
-    MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-    MASK_SEED = None  # The seed for generating random values during extrapolation
-    CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-    MIN_FACE_SIZE = 50
-    USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-    RGB_OUTPUT_FLAG = True  # Return RGB cropped face using local cropper
-    DETECT_FACES_FLAG = True  # find annotations locally replacing the database annotations
-    FACE_DETECTION_METHOD = "dlib"
-
-    preprocessor = VideoFaceCrop(
-        cropped_image_size=CROPPED_IMAGE_SIZE,
-        cropped_positions=CROPPED_POSITIONS,
-        fixed_positions=FIXED_POSITIONS,
-        mask_sigma=MASK_SIGMA,
-        mask_neighbors=MASK_NEIGHBORS,
-        mask_seed=None,
-        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-        min_face_size=MIN_FACE_SIZE,
-        use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-        rgb_output_flag=RGB_OUTPUT_FLAG,
-        detect_faces_flag=DETECT_FACES_FLAG,
-        face_detection_method=FACE_DETECTION_METHOD)
+    # test another configuration of the preprocessor:
+
+    FACE_SIZE = 64 # The size of the resulting face
+    RGB_OUTPUT_FLAG = True # Gray-scale output
+    USE_FACE_ALIGNMENT = False # use annotations
+    MAX_IMAGE_SIZE = None # no limiting here
+    FACE_DETECTION_METHOD = "dlib" # use annotations
+    MIN_FACE_SIZE = 50 # skip small faces
+
+    image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                       rgb_output_flag = RGB_OUTPUT_FLAG,
+                                       use_face_alignment = USE_FACE_ALIGNMENT,
+                                       max_image_size = MAX_IMAGE_SIZE,
+                                       face_detection_method = FACE_DETECTION_METHOD,
+                                       min_face_size = MIN_FACE_SIZE)
+
+    preprocessor = Wrapper(image_preprocessor)
 
     video, _ = convert_image_to_video_data(image, annotations, 3)
 
@@ -310,34 +292,29 @@ def test_video_lbp_histogram():
     Test LBPHistogram with Wrapper extractor.
     """
 
+    from ..preprocessor import FaceCropAlign
+
+    from bob.bio.video.preprocessor import Wrapper
+
+    FACE_SIZE = 64 # The size of the resulting face
+    RGB_OUTPUT_FLAG = False # Gray-scale output
+    USE_FACE_ALIGNMENT = False # use annotations
+    MAX_IMAGE_SIZE = None # no limiting here
+    FACE_DETECTION_METHOD = None # use annotations
+    MIN_FACE_SIZE = 50 # skip small faces
+
+    image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+                                       rgb_output_flag = RGB_OUTPUT_FLAG,
+                                       use_face_alignment = USE_FACE_ALIGNMENT,
+                                       max_image_size = MAX_IMAGE_SIZE,
+                                       face_detection_method = FACE_DETECTION_METHOD,
+                                       min_face_size = MIN_FACE_SIZE)
+
+    preprocessor = Wrapper(image_preprocessor)
+
     image = load(datafile('test_image.png', 'bob.pad.face.test'))
     annotations = {'topleft': (95, 155), 'bottomright': (215, 265)}
 
-    CROPPED_IMAGE_SIZE = (64, 64)  # The size of the resulting face
-    CROPPED_POSITIONS = {'topleft': (0, 0), 'bottomright': CROPPED_IMAGE_SIZE}
-    FIXED_POSITIONS = None
-    MASK_SIGMA = None  # The sigma for random values areas outside image
-    MASK_NEIGHBORS = 5  # The number of neighbors to consider while extrapolating
-    MASK_SEED = None  # The seed for generating random values during extrapolation
-    CHECK_FACE_SIZE_FLAG = True  # Check the size of the face
-    MIN_FACE_SIZE = 50  # Minimal possible size of the face
-    USE_LOCAL_CROPPER_FLAG = True  # Use the local face cropping class (identical to Ivana's paper)
-    RGB_OUTPUT_FLAG = False  # The output is gray-scale
-    COLOR_CHANNEL = 'gray'  # Convert image to gray-scale format
-
-    preprocessor = VideoFaceCrop(
-        cropped_image_size=CROPPED_IMAGE_SIZE,
-        cropped_positions=CROPPED_POSITIONS,
-        fixed_positions=FIXED_POSITIONS,
-        mask_sigma=MASK_SIGMA,
-        mask_neighbors=MASK_NEIGHBORS,
-        mask_seed=MASK_SEED,
-        check_face_size_flag=CHECK_FACE_SIZE_FLAG,
-        min_face_size=MIN_FACE_SIZE,
-        use_local_cropper_flag=USE_LOCAL_CROPPER_FLAG,
-        rgb_output_flag=RGB_OUTPUT_FLAG,
-        color_channel=COLOR_CHANNEL)
-
     video, annotations = convert_image_to_video_data(image, annotations, 20)
 
     faces = preprocessor(frames=video, annotations=annotations)