From 579aba474363b18da8186eca0dda3e5d182b2058 Mon Sep 17 00:00:00 2001
From: Xinyi ZHANG <xzhang@vws101.idiap.ch>
Date: Fri, 23 Apr 2021 08:54:07 +0200
Subject: [PATCH] 2

---
 bob/bio/face/annotator/__init__.py            |  1 +
 bob/bio/face/annotator/bobiptinyface.py       |  5 +--
 bob/bio/face/config/annotator/tinyface.py     |  2 +-
 bob/bio/face/config/baseline/mxnet_pipe.py    |  4 +-
 bob/bio/face/config/baseline/opencv_pipe.py   |  9 +++++
 .../face/config/baseline/pytorch_pipe_v1.py   | 14 +++++--
 .../face/config/baseline/pytorch_pipe_v2.py   | 15 ++++++--
 bob/bio/face/config/baseline/tf_pipe.py       |  8 ++--
 bob/bio/face/extractor/MxNetModel.py          | 13 +++----
 .../{mxnet_resnet.py => MxNetModel.py~HEAD}   | 13 ++++---
 bob/bio/face/extractor/OpenCVModel.py         | 12 +++---
 .../{opencv_caffe.py => OpenCVModel.py~HEAD}  | 16 +++-----
 bob/bio/face/extractor/PyTorchModel.py        | 33 +++++++++-------
 ...{pytorch_model.py => PyTorchModel.py~HEAD} | 38 ++++++++++---------
 bob/bio/face/extractor/TensorFlowModel.py     | 10 ++---
 .../{tf_model.py => TensorFlowModel.py~HEAD}  | 11 +++---
 bob/bio/face/extractor/__init__.py            | 20 +++++-----
 doc/baselines.rst                             | 12 +++---
 doc/deeplearningextractor.rst                 |  2 +-
 doc/implemented.rst                           | 11 +++---
 doc/references.rst                            |  2 +-
 setup.py                                      | 11 ++++--
 22 files changed, 150 insertions(+), 112 deletions(-)
 rename bob/bio/face/extractor/{mxnet_resnet.py => MxNetModel.py~HEAD} (93%)
 rename bob/bio/face/extractor/{opencv_caffe.py => OpenCVModel.py~HEAD} (93%)
 rename bob/bio/face/extractor/{pytorch_model.py => PyTorchModel.py~HEAD} (88%)
 rename bob/bio/face/extractor/{tf_model.py => TensorFlowModel.py~HEAD} (97%)

diff --git a/bob/bio/face/annotator/__init__.py b/bob/bio/face/annotator/__init__.py
index 1434c9b4..b058d897 100644
--- a/bob/bio/face/annotator/__init__.py
+++ b/bob/bio/face/annotator/__init__.py
@@ -1,5 +1,6 @@
 import bob.ip.facedetect
 
+
 def bounding_box_to_annotations(bbx):
     """Converts :any:`bob.ip.facedetect.BoundingBox` to dictionary annotations.
 
diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py
index d2790c82..fc06d274 100644
--- a/bob/bio/face/annotator/bobiptinyface.py
+++ b/bob/bio/face/annotator/bobiptinyface.py
@@ -26,10 +26,7 @@ class BobIpTinyface(Base):
             Annotations with (topleft, bottomright) keys (or None).
         """
 
-        annotations = self.tinyface.detect(image)
-
         if annotations is not None:
-            r = annotations[0]
-            return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])}
+            return annotations[0]
         else:
             return None
diff --git a/bob/bio/face/config/annotator/tinyface.py b/bob/bio/face/config/annotator/tinyface.py
index 2274bfef..bf223ea9 100644
--- a/bob/bio/face/config/annotator/tinyface.py
+++ b/bob/bio/face/config/annotator/tinyface.py
@@ -1,3 +1,3 @@
 from bob.bio.face.annotator import BobIpTinyface
 
-annotator = BobIpTinyface()
+annotator = BobIpTinyface()
\ No newline at end of file
diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py
index eeb06200..b28ccd59 100644
--- a/bob/bio/face/config/baseline/mxnet_pipe.py
+++ b/bob/bio/face/config/baseline/mxnet_pipe.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import mxnet_model
+from bob.bio.face.extractor import MxNetModel
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
 import scipy.spatial
@@ -39,7 +39,7 @@ transform_extra_arguments = (
 )
 
 
-extractor_transformer = mxnet_model()
+extractor_transformer = MxNetModel()
 
 algorithm = Distance(
     distance_function=scipy.spatial.distance.cosine, is_distance_function=True
diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py
index aac2d3f1..8d8c63f8 100644
--- a/bob/bio/face/config/baseline/opencv_pipe.py
+++ b/bob/bio/face/config/baseline/opencv_pipe.py
@@ -35,6 +35,15 @@ preprocessor_transformer = FaceCrop(
     fixed_positions=fixed_positions,
 )
 
+cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
+# Preprocessor
+preprocessor_transformer = FaceCrop(
+    cropped_image_size=(224, 224),
+    cropped_positions={"leye": (100, 140), "reye": (100, 95)},
+    color_channel="rgb",
+    fixed_positions=fixed_positions,
+)
+
 transform_extra_arguments = (
     None
     if (cropped_positions is None or fixed_positions is not None)
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
index ec5c4d9d..843e5381 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import pytorch_loaded_model
+from bob.bio.face.extractor import PyTorchLoadedModel
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
 import scipy.spatial
@@ -25,9 +25,11 @@ else:
 
 cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
 
+cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
+
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(224, 224),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    cropped_positions={"leye": (110, 144), "reye": (110, 96)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -38,8 +40,14 @@ transform_extra_arguments = (
     else (("annotations", "annotations"),)
 )
 
+transform_extra_arguments = (
+    None
+    if (cropped_positions is None or fixed_positions is not None)
+    else (("annotations", "annotations"),)
+)
+
 
-extractor_transformer = pytorch_loaded_model()
+extractor_transformer = PyTorchLoadedModel()
 
 algorithm = Distance(
     distance_function=scipy.spatial.distance.cosine, is_distance_function=True
diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
index 630609e6..90788963 100644
--- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py
+++ b/bob/bio/face/config/baseline/pytorch_pipe_v2.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import pytorch_library_model
+from bob.bio.face.extractor import PyTorchLibraryModel
 from facenet_pytorch import InceptionResnetV1
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
@@ -26,9 +26,11 @@ else:
 
 cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
 
+cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
+
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(224, 224),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    cropped_positions={"leye": (110, 144), "reye": (110, 96)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -40,8 +42,15 @@ transform_extra_arguments = (
 )
 
 
+transform_extra_arguments = (
+    None
+    if (cropped_positions is None or fixed_positions is not None)
+    else (("annotations", "annotations"),)
+)
+
+
 model = InceptionResnetV1(pretrained="vggface2").eval()
-extractor_transformer = pytorch_library_model(model=model)
+extractor_transformer = PyTorchLibraryModel(model=model)
 
 
 algorithm = Distance(
diff --git a/bob/bio/face/config/baseline/tf_pipe.py b/bob/bio/face/config/baseline/tf_pipe.py
index 22c24d95..c4ea24a6 100644
--- a/bob/bio/face/config/baseline/tf_pipe.py
+++ b/bob/bio/face/config/baseline/tf_pipe.py
@@ -1,6 +1,6 @@
 import bob.bio.base
 from bob.bio.face.preprocessor import FaceCrop
-from bob.bio.face.extractor import tf_model
+from bob.bio.face.extractor import TensorFlowModel
 from bob.bio.base.algorithm import Distance
 from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
 import scipy.spatial
@@ -24,11 +24,11 @@ else:
 
 
 # Preprocessor
-cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
+cropped_positions = {"leye": (80, 100), "reye": (80, 60)}
 
 preprocessor_transformer = FaceCrop(
     cropped_image_size=(160, 160),
-    cropped_positions={"leye": (49, 72), "reye": (49, 38)},
+    cropped_positions={"leye": (80, 100), "reye": (80, 60)},
     color_channel="rgb",
     fixed_positions=fixed_positions,
 )
@@ -41,7 +41,7 @@ transform_extra_arguments = (
 
 
 # Extractor
-extractor_transformer = tf_model()
+extractor_transformer = TensorFlowModel()
 
 # Algorithm
 algorithm = Distance(
diff --git a/bob/bio/face/extractor/MxNetModel.py b/bob/bio/face/extractor/MxNetModel.py
index 84e85686..dd3ca8da 100644
--- a/bob/bio/face/extractor/MxNetModel.py
+++ b/bob/bio/face/extractor/MxNetModel.py
@@ -12,11 +12,9 @@ import mxnet as mx
 from mxnet import gluon
 import warnings
 from bob.extension import rc
-
 mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
 mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
 
-
 class MxNetModel(TransformerMixin, BaseEstimator):
 
     """Extracts features using deep face recognition models under MxNet Interfaces.
@@ -59,10 +57,8 @@ class MxNetModel(TransformerMixin, BaseEstimator):
 
         with warnings.catch_warnings():
             warnings.simplefilter("ignore")
-            deserialized_net = gluon.nn.SymbolBlock.imports(
-                mxnet_resnet_directory, ["data"], mxnet_weight_directory, ctx=ctx
-            )
-
+            deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx)
+               
         self.model = deserialized_net
 
     def transform(self, X):
@@ -80,7 +76,7 @@ class MxNetModel(TransformerMixin, BaseEstimator):
     feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-
+    
         if self.model is None:
             self._load_model()
 
@@ -89,6 +85,7 @@ class MxNetModel(TransformerMixin, BaseEstimator):
 
         return self.model(X,).asnumpy()
 
+
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -97,4 +94,4 @@ class MxNetModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
+        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
diff --git a/bob/bio/face/extractor/mxnet_resnet.py b/bob/bio/face/extractor/MxNetModel.py~HEAD
similarity index 93%
rename from bob/bio/face/extractor/mxnet_resnet.py
rename to bob/bio/face/extractor/MxNetModel.py~HEAD
index 213c6971..84e85686 100644
--- a/bob/bio/face/extractor/mxnet_resnet.py
+++ b/bob/bio/face/extractor/MxNetModel.py~HEAD
@@ -12,10 +12,12 @@ import mxnet as mx
 from mxnet import gluon
 import warnings
 from bob.extension import rc
+
 mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
 mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
 
-class mxnet_model(TransformerMixin, BaseEstimator):
+
+class MxNetModel(TransformerMixin, BaseEstimator):
 
     """Extracts features using deep face recognition models under MxNet Interfaces.
   
@@ -57,8 +59,10 @@ class mxnet_model(TransformerMixin, BaseEstimator):
 
         with warnings.catch_warnings():
             warnings.simplefilter("ignore")
-            deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx)
-               
+            deserialized_net = gluon.nn.SymbolBlock.imports(
+                mxnet_resnet_directory, ["data"], mxnet_weight_directory, ctx=ctx
+            )
+
         self.model = deserialized_net
 
     def transform(self, X):
@@ -76,7 +80,7 @@ class mxnet_model(TransformerMixin, BaseEstimator):
     feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-    
+
         if self.model is None:
             self._load_model()
 
@@ -85,7 +89,6 @@ class mxnet_model(TransformerMixin, BaseEstimator):
 
         return self.model(X,).asnumpy()
 
-
     def __getstate__(self):
         # Handling unpicklable objects
 
diff --git a/bob/bio/face/extractor/OpenCVModel.py b/bob/bio/face/extractor/OpenCVModel.py
index 728b2b7c..628a98ce 100644
--- a/bob/bio/face/extractor/OpenCVModel.py
+++ b/bob/bio/face/extractor/OpenCVModel.py
@@ -41,6 +41,7 @@ class OpenCVModel(TransformerMixin, BaseEstimator):
        This structure only can be used for CAFFE pretrained model.
     """
 
+
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
         self.model = None
@@ -59,7 +60,7 @@ class OpenCVModel(TransformerMixin, BaseEstimator):
 
     def _load_model(self):
 
-        net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt, opencv_model_directory)
+        net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory)
 
         self.model = net
 
@@ -78,17 +79,18 @@ class OpenCVModel(TransformerMixin, BaseEstimator):
     feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-
+    
         if self.model is None:
             self._load_model()
 
         img = np.array(X)
-        img = img / 255
+        img = img/255
 
         self.model.setInput(img)
-
+     
         return self.model.forward()
 
+
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -97,4 +99,4 @@ class OpenCVModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
+        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
diff --git a/bob/bio/face/extractor/opencv_caffe.py b/bob/bio/face/extractor/OpenCVModel.py~HEAD
similarity index 93%
rename from bob/bio/face/extractor/opencv_caffe.py
rename to bob/bio/face/extractor/OpenCVModel.py~HEAD
index eb520e6d..728b2b7c 100644
--- a/bob/bio/face/extractor/opencv_caffe.py
+++ b/bob/bio/face/extractor/OpenCVModel.py~HEAD
@@ -25,7 +25,7 @@ opencv_model_directory = rc["bob.extractor_model.opencv"]
 opencv_model_prototxt = rc["bob.extractor_weights.opencv"]
 
 
-class opencv_model(TransformerMixin, BaseEstimator):
+class OpenCVModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under OpenCV Interface
 
   Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`. 
@@ -39,15 +39,11 @@ class opencv_model(TransformerMixin, BaseEstimator):
 
     .. note::
        This structure only can be used for CAFFE pretrained model.
-
-
     """
 
-
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
         self.model = None
-        
 
         internal_path = pkg_resources.resource_filename(
             __name__, os.path.join("data", "opencv_model"),
@@ -63,7 +59,7 @@ class opencv_model(TransformerMixin, BaseEstimator):
 
     def _load_model(self):
 
-        net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory)
+        net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt, opencv_model_directory)
 
         self.model = net
 
@@ -82,17 +78,16 @@ class opencv_model(TransformerMixin, BaseEstimator):
     feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-    
+
         if self.model is None:
-              
             self._load_model()
 
         img = np.array(X)
+        img = img / 255
 
         self.model.setInput(img)
-     
-        return self.model.forward()
 
+        return self.model.forward()
 
     def __getstate__(self):
         # Handling unpicklable objects
@@ -102,5 +97,4 @@ class opencv_model(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-          
         return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/PyTorchModel.py b/bob/bio/face/extractor/PyTorchModel.py
index 24e02bc3..cf46de66 100644
--- a/bob/bio/face/extractor/PyTorchModel.py
+++ b/bob/bio/face/extractor/PyTorchModel.py
@@ -16,7 +16,6 @@ import imp
 pytorch_model_directory = rc["bob.extractor_model.pytorch"]
 pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
 
-
 class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
     
@@ -28,7 +27,7 @@ class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
     $ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
   
   The extracted features can be combined with different the algorithms. 
-    """
+    """ 
 
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
@@ -49,12 +48,12 @@ class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
 
     def _load_model(self):
 
-        MainModel = imp.load_source("MainModel", pytorch_model_directory)
+        MainModel = imp.load_source('MainModel', pytorch_model_directory)
         network = torch.load(pytorch_weight_directory)
         network.eval()
-
-        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
-
+        
+        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
+        
         network.to(self.device)
 
         self.model = network
@@ -78,10 +77,11 @@ class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
             self._load_model()
 
         X = torch.Tensor(X)
-        X = X / 255
+        X = X/255
 
         return self.model(X).detach().numpy()
 
+
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -91,8 +91,14 @@ class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
 
     def _more_tags(self):
         return {"stateless": True, "requires_fit": False}
-
-
+        
+        
+        
+        
+        
+        
+        
+        
 class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition with registered model frames in the PyTorch Library. 
     
@@ -122,8 +128,8 @@ class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
         self.device = None
 
     def _load_model(self):
-
-        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        
+        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
         self.model.to(self.device)
 
     def transform(self, X):
@@ -146,10 +152,11 @@ class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
             self._load_model()
 
         X = torch.Tensor(X)
-        X = X / 255
+        X = X/255
 
         return self.model(X).detach().numpy()
 
+
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -158,4 +165,4 @@ class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
+        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
diff --git a/bob/bio/face/extractor/pytorch_model.py b/bob/bio/face/extractor/PyTorchModel.py~HEAD
similarity index 88%
rename from bob/bio/face/extractor/pytorch_model.py
rename to bob/bio/face/extractor/PyTorchModel.py~HEAD
index c940aae6..24e02bc3 100644
--- a/bob/bio/face/extractor/pytorch_model.py
+++ b/bob/bio/face/extractor/PyTorchModel.py~HEAD
@@ -16,7 +16,8 @@ import imp
 pytorch_model_directory = rc["bob.extractor_model.pytorch"]
 pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
 
-class pytorch_loaded_model(TransformerMixin, BaseEstimator):
+
+class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
     
   Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`. 
@@ -27,14 +28,11 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
     $ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
   
   The extracted features can be combined with different the algorithms. 
-
-
-    """ 
+    """
 
     def __init__(self, **kwargs):
         super().__init__(**kwargs)
         self.model = None
-        
 
         internal_path = pkg_resources.resource_filename(
             __name__, os.path.join("data", "resnet"),
@@ -51,12 +49,12 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
 
     def _load_model(self):
 
-        MainModel = imp.load_source('MainModel', pytorch_model_directory)
+        MainModel = imp.load_source("MainModel", pytorch_model_directory)
         network = torch.load(pytorch_weight_directory)
         network.eval()
-        
-        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
-        
+
+        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+
         network.to(self.device)
 
         self.model = network
@@ -76,15 +74,14 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
     feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-    
         if self.model is None:
             self._load_model()
 
         X = torch.Tensor(X)
+        X = X / 255
 
         return self.model(X).detach().numpy()
 
-
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -93,10 +90,10 @@ class pytorch_loaded_model(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-
         return {"stateless": True, "requires_fit": False}
-              
-class pytorch_library_model(TransformerMixin, BaseEstimator):
+
+
+class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition with registered model frames in the PyTorch Library. 
     
   Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
@@ -105,7 +102,6 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
 
   **Parameters:**
   model: pytorch model calling from library.
-  use_gpu: True or False.
     """
 
     def __init__(self, model=None, **kwargs):
@@ -125,6 +121,11 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
         self.checkpoint_path = checkpoint_path
         self.device = None
 
+    def _load_model(self):
+
+        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
+        self.model.to(self.device)
+
     def transform(self, X):
         """__call__(image) -> feature
 
@@ -141,11 +142,14 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
       The list of features extracted from the image.
     """
 
+        if self.model is None:
+            self._load_model()
+
         X = torch.Tensor(X)
+        X = X / 255
 
         return self.model(X).detach().numpy()
 
-
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -154,4 +158,4 @@ class pytorch_library_model(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
+        return {"stateless": True, "requires_fit": False}
diff --git a/bob/bio/face/extractor/TensorFlowModel.py b/bob/bio/face/extractor/TensorFlowModel.py
index 697afec8..dca1bc9a 100644
--- a/bob/bio/face/extractor/TensorFlowModel.py
+++ b/bob/bio/face/extractor/TensorFlowModel.py
@@ -17,7 +17,6 @@ from tensorflow import keras
 
 tf_model_directory = rc["bob.extractor_model.tf"]
 
-
 class TensorFlowModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under TensorFlow Interface.
 
@@ -67,19 +66,20 @@ class TensorFlowModel(TransformerMixin, BaseEstimator):
     feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-
+    
         if self.model is None:
             self._load_model()
-
+        
         X = check_array(X, allow_nd=True)
         X = tf.convert_to_tensor(X)
         X = to_channels_last(X)
 
-        X = X / 255
+        X = X/255
         predict = self.model.predict(X)
 
         return predict
 
+
     def __getstate__(self):
         # Handling unpicklable objects
 
@@ -88,4 +88,4 @@ class TensorFlowModel(TransformerMixin, BaseEstimator):
         return d
 
     def _more_tags(self):
-        return {"stateless": True, "requires_fit": False}
+        return {"stateless": True, "requires_fit": False}
\ No newline at end of file
diff --git a/bob/bio/face/extractor/tf_model.py b/bob/bio/face/extractor/TensorFlowModel.py~HEAD
similarity index 97%
rename from bob/bio/face/extractor/tf_model.py
rename to bob/bio/face/extractor/TensorFlowModel.py~HEAD
index a94f0adb..697afec8 100644
--- a/bob/bio/face/extractor/tf_model.py
+++ b/bob/bio/face/extractor/TensorFlowModel.py~HEAD
@@ -17,7 +17,8 @@ from tensorflow import keras
 
 tf_model_directory = rc["bob.extractor_model.tf"]
 
-class tf_model(TransformerMixin, BaseEstimator):
+
+class TensorFlowModel(TransformerMixin, BaseEstimator):
     """Extracts features using deep face recognition models under TensorFlow Interface.
 
   Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`. 
@@ -27,8 +28,6 @@ class tf_model(TransformerMixin, BaseEstimator):
     $ bob config set bob.extractor_model.tf /PATH/TO/MODEL/
   
   The extracted features can be combined with different the algorithms. 
-
-
     """
 
     def __init__(self, **kwargs):
@@ -68,19 +67,19 @@ class tf_model(TransformerMixin, BaseEstimator):
     feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
       The list of features extracted from the image.
     """
-    
+
         if self.model is None:
             self._load_model()
 
         X = check_array(X, allow_nd=True)
         X = tf.convert_to_tensor(X)
         X = to_channels_last(X)
-        predict = self.model.predict(X)
 
+        X = X / 255
+        predict = self.model.predict(X)
 
         return predict
 
-
     def __getstate__(self):
         # Handling unpicklable objects
 
diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py
index 32b97163..d98275bc 100644
--- a/bob/bio/face/extractor/__init__.py
+++ b/bob/bio/face/extractor/__init__.py
@@ -1,11 +1,11 @@
 from .DCTBlocks import DCTBlocks
 from .GridGraph import GridGraph
 from .LGBPHS import LGBPHS
-from .mxnet_resnet import mxnet_model
-from .pytorch_model import pytorch_loaded_model
-from .pytorch_model import pytorch_library_model
-from .tf_model import tf_model
-from .opencv_caffe import opencv_model
+from .MxNetModel import MxNetModel
+from .PyTorchModel import PyTorchLoadedModel
+from .PyTorchModel import PyTorchLibraryModel
+from .TensorFlowModel import TensorFlowModel
+from .OpenCVModel import OpenCVModel
 
 # gets sphinx autodoc done right - don't remove it
 def __appropriate__(*args):
@@ -27,10 +27,10 @@ __appropriate__(
     DCTBlocks,
     GridGraph,
     LGBPHS,
-    mxnet_model,
-    pytorch_loaded_model,
-    pytorch_library_model,
-    tf_model,
-    opencv_model,
+    MxNetModel,
+    PyTorchLoadedModel,
+    PyTorchLibraryModel,
+    TensorFlowModel,
+    OpenCVModel,
 )
 __all__ = [_ for _ in dir() if not _.startswith("_")]
diff --git a/doc/baselines.rst b/doc/baselines.rst
index 88c42a62..4f8d9c2d 100644
--- a/doc/baselines.rst
+++ b/doc/baselines.rst
@@ -61,12 +61,14 @@ Deep learning baselines
 Deep Learning with different interfaces baselines
 =================================================
 
-* ``mxnet_pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
+* ``mxnet-pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
 
-* ``pytorch_pipe_v1``: Pytorch network that extracs 1000-dimensional featrues, trained by Manual Gunther, as described in [LGB18]_
+* ``mxnet-tinyface``: Applying `tinyface annoator <https://github.com/chinakook/hr101_mxnet>`_ for the Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
 
-* ``pytorch_pipe_v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
+* ``pytorch-pipe-v1``: Pytorch network that extracts 1000-dimensional features, trained by Manual Gunther, as described in [LGB18]_
 
-* ``tf_pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+* ``pytorch-pipe-v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
 
-* ``opencv_pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_
+* ``tf-pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
+
+* ``opencv-pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_
diff --git a/doc/deeplearningextractor.rst b/doc/deeplearningextractor.rst
index 378a35d8..466a0fa4 100644
--- a/doc/deeplearningextractor.rst
+++ b/doc/deeplearningextractor.rst
@@ -234,7 +234,7 @@ In this baseline, we use :py:class:`bob.bio.face.preprocessor.FaceCrop` with  ``
 and ``cropped_image_size=(160,160)`` 
 as preprocessor,  Inception Resnet v2  in [TFP18]_ as extractor, and ``distance-cosine`` as the algorithm. By testing on LFW database, we get the following ROC plot:
 
-.. figure:: img/tensorflow_lfw_pipe.png
+.. figure:: img/tensorflow_pipe.png
   :figwidth: 75%
   :align: center
   :alt: Face recognition results of LFW database.
diff --git a/doc/implemented.rst b/doc/implemented.rst
index 75acc712..7a3074a4 100644
--- a/doc/implemented.rst
+++ b/doc/implemented.rst
@@ -13,6 +13,7 @@ Databases
 .. autosummary::
    bob.bio.face.database.ARFaceBioDatabase
    bob.bio.face.database.AtntBioDatabase
+   bob.bio.face.database.CasiaAfricaDatabase
    bob.bio.face.database.MobioDatabase
    bob.bio.face.database.ReplayBioDatabase
    bob.bio.face.database.ReplayMobileBioDatabase
@@ -57,11 +58,11 @@ Image Feature Extractors
    bob.bio.face.extractor.DCTBlocks
    bob.bio.face.extractor.GridGraph
    bob.bio.face.extractor.LGBPHS
-   bob.bio.face.extractor.mxnet_model
-   bob.bio.face.extractor.pytorch_loaded_model
-   bob.bio.face.extractor.pytorch_library_model
-   bob.bio.face.extractor.tf_model
-   bob.bio.face.extractor.opencv_model
+   bob.bio.face.extractor.MxNetModel
+   bob.bio.face.extractor.PyTorchLoadedModel
+   bob.bio.face.extractor.PyTorchLibraryModel
+   bob.bio.face.extractor.TensorFlowModel
+   bob.bio.face.extractor.OpenCVModel
 
 Face Recognition Algorithms
 ~~~~~~~~~~~~~~~~~~~~~~~~~~~
diff --git a/doc/references.rst b/doc/references.rst
index bbb5417a..7d6d919c 100644
--- a/doc/references.rst
+++ b/doc/references.rst
@@ -17,4 +17,4 @@ References
 .. [ZSQ09]  *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
 .. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
 .. [HRM06]   *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
-.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), Lake Tahoe, NV, USA, 2018, pp. 131-140, doi: 10.1109/WACV.2018.00021.
\ No newline at end of file
+.. [LGB18]    *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. IEEE Winter Conference on Applications of Computer Vision (WACV), 2018.
diff --git a/setup.py b/setup.py
index 1b89313b..be478a7d 100644
--- a/setup.py
+++ b/setup.py
@@ -137,9 +137,10 @@ setup(
             "lgbphs = bob.bio.face.config.baseline.lgbphs:transformer",
             "dummy = bob.bio.face.config.baseline.dummy:transformer",
             "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:transformer",
+            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:transformer",
             "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:transformer",
             "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:transformer",
-            "tf-pipe = bob.bio.face.config.baseline.ty_pipe:transformer",
+            "tf-pipe = bob.bio.face.config.baseline.tf_pipe:transformer",
             "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:transformer",
         ],
         # baselines
@@ -155,11 +156,13 @@ setup(
             "lda = bob.bio.face.config.baseline.lda:pipeline",
             "dummy = bob.bio.face.config.baseline.dummy:pipeline",
             "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline",
+            "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021:pipeline",
             "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
             "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:pipeline",
+            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:pipeline",
             "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:pipeline",
             "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:pipeline",
-            "tf-pipe = bob.bio.face.config.baseline.ty_pipe:pipeline",
+            "tf-pipe = bob.bio.face.config.baseline.tf_pipe:pipeline",
             "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:pipeline",
         ],
         "bob.bio.config": [
@@ -173,9 +176,10 @@ setup(
             "lgbphs = bob.bio.face.config.baseline.lgbphs",
             "lda = bob.bio.face.config.baseline.lda",
             "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe",
+            "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface",
             "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1",
             "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2",
-            "tf-pipe = bob.bio.face.config.baseline.ty_pipe",
+            "tf-pipe = bob.bio.face.config.baseline.tf_pipe",
             "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe",
             "arface            = bob.bio.face.config.database.arface",
             "atnt              = bob.bio.face.config.database.atnt",
@@ -196,6 +200,7 @@ setup(
             "casia-africa = bob.bio.face.config.database.casia_africa",
             "morph = bob.bio.face.config.database.morph",
             "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021",
+            "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021",
             "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
         ],
         "bob.bio.cli": [
-- 
GitLab