From e236cac643e458b1b871ec84328dcaab12a44edc Mon Sep 17 00:00:00 2001
From: Olegs NIKISINS <onikisins@italix03.idiap.ch>
Date: Mon, 5 Mar 2018 15:54:54 +0100
Subject: [PATCH] Added underscore to preproc configs to resolve warning when
 running baselines

---
 bob/pad/face/config/lbp_svm.py                |  4 +--
 bob/pad/face/config/lbp_svm_aggregated_db.py  |  4 +--
 .../config/preprocessor/video_face_crop.py    | 28 +++++++++----------
 bob/pad/face/config/qm_lr.py                  |  4 +--
 bob/pad/face/config/qm_one_class_gmm.py       |  4 +--
 .../config/qm_one_class_svm_aggregated_db.py  |  4 +--
 .../qm_one_class_svm_cascade_aggregated_db.py |  4 +--
 bob/pad/face/config/qm_svm.py                 |  4 +--
 bob/pad/face/config/qm_svm_aggregated_db.py   |  4 +--
 9 files changed, 30 insertions(+), 30 deletions(-)

diff --git a/bob/pad/face/config/lbp_svm.py b/bob/pad/face/config/lbp_svm.py
index f5eba528..c57ed630 100644
--- a/bob/pad/face/config/lbp_svm.py
+++ b/bob/pad/face/config/lbp_svm.py
@@ -31,14 +31,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
diff --git a/bob/pad/face/config/lbp_svm_aggregated_db.py b/bob/pad/face/config/lbp_svm_aggregated_db.py
index fb84bd47..812bf4a9 100644
--- a/bob/pad/face/config/lbp_svm_aggregated_db.py
+++ b/bob/pad/face/config/lbp_svm_aggregated_db.py
@@ -33,14 +33,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces with the size
diff --git a/bob/pad/face/config/preprocessor/video_face_crop.py b/bob/pad/face/config/preprocessor/video_face_crop.py
index 57bb5901..e8aa571b 100644
--- a/bob/pad/face/config/preprocessor/video_face_crop.py
+++ b/bob/pad/face/config/preprocessor/video_face_crop.py
@@ -16,23 +16,23 @@ MAX_IMAGE_SIZE = None  # no limiting here
 FACE_DETECTION_METHOD = "dlib"  # use dlib face detection
 MIN_FACE_SIZE = 50  # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
-                                   rgb_output_flag=RGB_OUTPUT_FLAG,
-                                   use_face_alignment=USE_FACE_ALIGNMENT,
-                                   max_image_size=MAX_IMAGE_SIZE,
-                                   face_detection_method=FACE_DETECTION_METHOD,
-                                   min_face_size=MIN_FACE_SIZE)
+_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                    rgb_output_flag=RGB_OUTPUT_FLAG,
+                                    use_face_alignment=USE_FACE_ALIGNMENT,
+                                    max_image_size=MAX_IMAGE_SIZE,
+                                    face_detection_method=FACE_DETECTION_METHOD,
+                                    min_face_size=MIN_FACE_SIZE)
 
-rgb_face_detector_dlib = Wrapper(image_preprocessor)
+rgb_face_detector_dlib = Wrapper(_image_preprocessor)
 
 # =======================================================================================
 FACE_DETECTION_METHOD = "mtcnn"  # use mtcnn face detection
 
-image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
-                                   rgb_output_flag=RGB_OUTPUT_FLAG,
-                                   use_face_alignment=USE_FACE_ALIGNMENT,
-                                   max_image_size=MAX_IMAGE_SIZE,
-                                   face_detection_method=FACE_DETECTION_METHOD,
-                                   min_face_size=MIN_FACE_SIZE)
+_image_preprocessor = FaceCropAlign(face_size=FACE_SIZE,
+                                    rgb_output_flag=RGB_OUTPUT_FLAG,
+                                    use_face_alignment=USE_FACE_ALIGNMENT,
+                                    max_image_size=MAX_IMAGE_SIZE,
+                                    face_detection_method=FACE_DETECTION_METHOD,
+                                    min_face_size=MIN_FACE_SIZE)
 
-rgb_face_detector_mtcnn = Wrapper(image_preprocessor)
+rgb_face_detector_mtcnn = Wrapper(_image_preprocessor)
diff --git a/bob/pad/face/config/qm_lr.py b/bob/pad/face/config/qm_lr.py
index 1e7522c0..0ae7eb31 100644
--- a/bob/pad/face/config/qm_lr.py
+++ b/bob/pad/face/config/qm_lr.py
@@ -30,14 +30,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_one_class_gmm.py b/bob/pad/face/config/qm_one_class_gmm.py
index 52d99739..15582c19 100644
--- a/bob/pad/face/config/qm_one_class_gmm.py
+++ b/bob/pad/face/config/qm_one_class_gmm.py
@@ -30,14 +30,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
index 1951849d..e30161c1 100644
--- a/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_aggregated_db.py
@@ -32,14 +32,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
index 01679f83..4c43a6e6 100644
--- a/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
+++ b/bob/pad/face/config/qm_one_class_svm_cascade_aggregated_db.py
@@ -32,14 +32,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_svm.py b/bob/pad/face/config/qm_svm.py
index 8e742ff2..422115b2 100644
--- a/bob/pad/face/config/qm_svm.py
+++ b/bob/pad/face/config/qm_svm.py
@@ -30,14 +30,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
diff --git a/bob/pad/face/config/qm_svm_aggregated_db.py b/bob/pad/face/config/qm_svm_aggregated_db.py
index 1acd37d9..3506a03b 100644
--- a/bob/pad/face/config/qm_svm_aggregated_db.py
+++ b/bob/pad/face/config/qm_svm_aggregated_db.py
@@ -32,14 +32,14 @@ MAX_IMAGE_SIZE = None # no limiting here
 FACE_DETECTION_METHOD = None # use annotations
 MIN_FACE_SIZE = 50 # skip small faces
 
-image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
+_image_preprocessor = FaceCropAlign(face_size = FACE_SIZE,
                                    rgb_output_flag = RGB_OUTPUT_FLAG,
                                    use_face_alignment = USE_FACE_ALIGNMENT,
                                    max_image_size = MAX_IMAGE_SIZE,
                                    face_detection_method = FACE_DETECTION_METHOD,
                                    min_face_size = MIN_FACE_SIZE)
 
-preprocessor = Wrapper(image_preprocessor)
+preprocessor = Wrapper(_image_preprocessor)
 """
 In the preprocessing stage the face is cropped in each frame of the input video given facial annotations.
 The size of the face is normalized to ``FACE_SIZE`` dimensions. The faces of the size
-- 
GitLab