diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
index f5eba5286f783b20d3a07d852d3fa5cc78c4999c..c57ed630fffa951342ccf5db2e4b7de04c5aec84 100644
--- a/bob/pad/face/config/lbp_svm.py
+++ b/bob/pad/face/config/lbp_svm.py
@@ -31,14 +31,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
diff --git a/bob/pad/face/config/lbp_svm_aggregated_db.py b/bob/pad/face/config/lbp_svm_aggregated_db.py
index fb84bd471c3f64caeefc48441c98299dec096a18..812bf4a98332c067fac4e9f63308152cea314ec9 100644
--- a/bob/pad/face/config/lbp_svm_aggregated_db.py
+++ b/bob/pad/face/config/lbp_svm_aggregated_db.py
@@ -33,14 +33,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py
index 57bb5901fc530fc2e20c4624ddd401ddb656f83e..e8aa571bd688d279322c624b590e1ffdd7131020 100644
--- a/bob/pad/face/config/preprocessor/video_face_crop.py
+++ b/bob/pad/face/config/preprocessor/video_face_crop.py
@@ -16,23 +16,23 @@ MAX_IMAGE_SIZE = None  # no limiting here
 FACE_DETECTION_METHOD = "dlib"  # use dlib face detection
 MIN_FACE_SIZE = 50  # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
-                                   rgb_output_flag=RGB_OUTPUT_FLAG,
-                                   use_face_alignment=USE_FACE_ALIGNMENT,
-                                   max_image_size=MAX_IMAGE_SIZE,
-                                   face_detection_method=FACE_DETECTION_METHOD,
-                                   min_face_size=MIN_FACE_SIZE)
+_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                    rgb_output_flag=RGB_OUTPUT_FLAG,
+                                    use_face_alignment=USE_FACE_ALIGNMENT,
+                                    max_image_size=MAX_IMAGE_SIZE,
+                                    face_detection_method=FACE_DETECTION_METHOD,
+                                    min_face_size=MIN_FACE_SIZE)
 
-rgb_face_detector_dlib = Wrapper(image_preprocessor)
+rgb_face_detector_dlib = Wrapper(_image_preprocessor)
 
 # =======================================================================================
 FACE_DETECTION_METHOD = "mtcnn"  # use mtcnn face detection
 
-image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
-                                   rgb_output_flag=RGB_OUTPUT_FLAG,
-                                   use_face_alignment=USE_FACE_ALIGNMENT,
-                                   max_image_size=MAX_IMAGE_SIZE,
-                                   face_detection_method=FACE_DETECTION_METHOD,
-                                   min_face_size=MIN_FACE_SIZE)
+_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                    rgb_output_flag=RGB_OUTPUT_FLAG,
+                                    use_face_alignment=USE_FACE_ALIGNMENT,
+                                    max_image_size=MAX_IMAGE_SIZE,
+                                    face_detection_method=FACE_DETECTION_METHOD,
+                                    min_face_size=MIN_FACE_SIZE)
 
-rgb_face_detector_mtcnn = Wrapper(image_preprocessor)
+rgb_face_detector_mtcnn = Wrapper(_image_preprocessor)
diff --git a/bob/pad/face/config/qm_lr.py b/bob/pad/face/config/qm_lr.py
index 1e7522c046720a6313cc1d58aa3102b22daeca99..0ae7eb31d45e0268b3dcbe6ff0019faa341494c7 100644
--- a/bob/pad/face/config/qm_lr.py
+++ b/bob/pad/face/config/qm_lr.py
@@ -30,14 +30,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_one_class_gmm.py b/bob/pad/face/config/qm_one_class_gmm.py
index 52d9973910761d74b8df9686630abfef287e7b01..15582c19670b0dcd9cfd2889139f250dab6afb9e 100644
--- a/bob/pad/face/config/qm_one_class_gmm.py
+++ b/bob/pad/face/config/qm_one_class_gmm.py
@@ -30,14 +30,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
index 1951849d1debcca6533db74995f4221558f0aea9..e30161c12ce26af0f11837c9beee4383bee046bc 100644
--- a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
@@ -32,14 +32,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
index 01679f8396e5a8ab7ef974bed0f9885f8f2be83f..4c43a6e6fc4d5415930bef38396ba16b95ff1d96 100644
--- a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
@@ -32,14 +32,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_svm.py b/bob/pad/face/config/qm_svm.py
index 8e742ff2246684976233c18205d3516f80f4ee46..422115b224d12bb3bd7145790ac8b660c3d0ece7 100644
--- a/bob/pad/face/config/qm_svm.py
+++ b/bob/pad/face/config/qm_svm.py
@@ -30,14 +30,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_svm_aggregated_db.py b/bob/pad/face/config/qm_svm_aggregated_db.py
index 1acd37d94b37217fb58173150db9dca06b0f591e..3506a03b4842ede20a8ce921ee1dea2c83c16b7e 100644
--- a/bob/pad/face/config/qm_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_svm_aggregated_db.py
@@ -32,14 +32,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size