From c2a907ccfb2eb8fbd4349956d707e06d4dedc66b Mon Sep 17 00:00:00 2001
From: Laurent COLBOIS <lcolbois@.idiap.ch>
Date: Thu, 6 May 2021 09:10:11 +0200
Subject: [PATCH] Regrouping config  helper modules

---
 bob/bio/face/config/baseline/helpers.py | 38 ++++++++++++++++++++++++-
 bob/bio/face/helpers.py                 | 37 ------------------------
 2 files changed, 37 insertions(+), 38 deletions(-)
 delete mode 100644 bob/bio/face/helpers.py

diff --git a/bob/bio/face/config/baseline/helpers.py b/bob/bio/face/config/baseline/helpers.py
index 764ae274..aa9e7cd4 100644
--- a/bob/bio/face/config/baseline/helpers.py
+++ b/bob/bio/face/config/baseline/helpers.py
@@ -1,14 +1,50 @@
 import bob.bio.face
+from bob.bio.face.preprocessor import FaceCrop, MultiFaceCrop, Scale
 from sklearn.pipeline import make_pipeline
 from bob.bio.base.wrappers import wrap_sample_preprocessor
 from bob.pipelines import wrap
-from bob.bio.face.helpers import face_crop_solver
 import numpy as np
 import logging
 
 logger = logging.getLogger(__name__)
 
 
+def face_crop_solver(
+    cropped_image_size,
+    cropped_positions=None,
+    color_channel="rgb",
+    fixed_positions=None,
+    annotator=None,
+    dtype="uint8",
+):
+    """
+    Decide which face cropper to use.
+    """
+    # If there's not cropped positions, just resize
+    if cropped_positions is None:
+        return Scale(cropped_image_size)
+    else:
+        # Detects the face and crops it without eye detection
+        if isinstance(cropped_positions, list):
+            return MultiFaceCrop(
+                cropped_image_size=cropped_image_size,
+                cropped_positions_list=cropped_positions,
+                fixed_positions_list=fixed_positions,
+                color_channel=color_channel,
+                dtype=dtype,
+                annotation=annotator,
+            )
+        else:
+            return FaceCrop(
+                cropped_image_size=cropped_image_size,
+                cropped_positions=cropped_positions,
+                color_channel=color_channel,
+                fixed_positions=fixed_positions,
+                dtype=dtype,
+                annotator=annotator,
+            )
+
+
 def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
     """
     Computes the default cropped positions for the FaceCropper used with Facenet-like 
diff --git a/bob/bio/face/helpers.py b/bob/bio/face/helpers.py
deleted file mode 100644
index 51bdf9b8..00000000
--- a/bob/bio/face/helpers.py
+++ /dev/null
@@ -1,37 +0,0 @@
-from bob.bio.face.preprocessor import FaceCrop, MultiFaceCrop, Scale
-
-
-def face_crop_solver(
-    cropped_image_size,
-    cropped_positions=None,
-    color_channel="rgb",
-    fixed_positions=None,
-    annotator=None,
-    dtype="uint8",
-):
-    """
-    Decide which face cropper to use.
-    """
-    # If there's not cropped positions, just resize
-    if cropped_positions is None:
-        return Scale(cropped_image_size)
-    else:
-        # Detects the face and crops it without eye detection
-        if isinstance(cropped_positions, list):
-            return MultiFaceCrop(
-                cropped_image_size=cropped_image_size,
-                cropped_positions_list=cropped_positions,
-                fixed_positions_list=fixed_positions,
-                color_channel=color_channel,
-                dtype=dtype,
-                annotation=annotator,
-            )
-        else:
-            return FaceCrop(
-                cropped_image_size=cropped_image_size,
-                cropped_positions=cropped_positions,
-                color_channel=color_channel,
-                fixed_positions=fixed_positions,
-                dtype=dtype,
-                annotator=annotator,
-            )
-- 
GitLab