From 16372b9d767bc747f21b638643761ead6649f6bf Mon Sep 17 00:00:00 2001
From: Xinyi ZHANG <xinyi.zhang@idiap.ch>
Date: Thu, 27 May 2021 18:18:14 +0200
Subject: [PATCH] Update bob/bio/face/config/baseline/tf_pipe.py,
 bob/bio/face/config/baseline/opencv_pipe.py,
 bob/bio/face/test/test_baselines.py, bob/bio/face/test/test_extractors.py,
 requirements.txt files

---
 bob/bio/face/config/baseline/opencv_pipe.py |  7 +-
 bob/bio/face/test/test_baselines.py         | 30 +++++++
 bob/bio/face/test/test_extractors.py        | 97 +++++++++++++++++++++
 requirements.txt                            |  4 +-
 4 files changed, 133 insertions(+), 5 deletions(-)

diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py
index afe4c940..bc9c5906 100644
--- a/bob/bio/face/config/baseline/opencv_pipe.py
+++ b/bob/bio/face/config/baseline/opencv_pipe.py
@@ -41,9 +41,12 @@ transform_extra_arguments = (
     else (("annotations", "annotations"),)
 )
 
-
 # Extractor
-extractor_transformer = OpenCVModel()
+
+weights = None  # PATH/TO/WEIGHTS
+config = None  # PATH/TO/CONFIG
+
+extractor_transformer = OpenCVModel(weights=weights, config=config)
 
 
 # Algorithm
diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py
index bbe3c4c9..df1f5de6 100644
--- a/bob/bio/face/test/test_baselines.py
+++ b/bob/bio/face/test/test_baselines.py
@@ -152,3 +152,33 @@ def test_gabor_graph():
 
 # def test_lda():
 #    run_baseline("lda", get_fake_samples_for_training())
+
+
+@pytest.mark.slow
+@is_library_available("opencv-python")
+def test_opencv_pipe():
+    run_baseline("opencv-pipe", target_scores=None)
+
+
+@pytest.mark.slow
+@is_library_available("mxnet")
+def test_mxnet_pipe():
+    run_baseline("mxnet-pipe", target_scores=None)
+
+
+@pytest.mark.slow
+@is_library_available("tensorflow")
+def test_tf_pipe():
+    run_baseline("tf-pipe", target_scores=None)
+
+
+@pytest.mark.slow
+@is_library_available("torch")
+def test_pytorch_pipe_v1():
+    run_baseline("pytorch-pipe-v1", target_scores=None)
+
+
+@pytest.mark.slow
+@is_library_available("torch")
+def test_pytorch_pipe_v2():
+    run_baseline("pytorch-pipe-v2", target_scores=None)
diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py
index c935beb2..62760f8e 100644
--- a/bob/bio/face/test/test_extractors.py
+++ b/bob/bio/face/test/test_extractors.py
@@ -178,3 +178,100 @@ def test_lgbphs():
         "bob.bio.face.test", "data/lgbphs_with_phase.hdf5"
     )
     _compare(feature, reference)
+
+
+def test_face_crop(height=112, width=112):
+    # read input
+    image, annotation = _image(), _annotation()
+    CROPPED_IMAGE_HEIGHT = height
+    CROPPED_IMAGE_WIDTH = width
+
+    # preprocessor with fixed eye positions (which correspond to th ones
+    fixed_cropper = bob.bio.face.preprocessor.FaceCrop(
+        cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH),
+        color_channel="rgb",
+        cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
+        fixed_positions={"reye": annotation["reye"], "leye": annotation["leye"]},
+    )
+
+    cropped = fixed_cropper.transform([image])
+    return cropped
+
+
+def _image():
+    return bob.io.base.load(
+        pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.jpg")
+    )
+
+
+def _annotation():
+
+    return read_annotation_file(
+        pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.pos"),
+        "named",
+    )
+
+
+def test_opencv():
+    data = _data()
+    opencv = bob.bio.face.embeddings.GenericOpenCV.OpenCVModel()
+    assert isinstance(opencv, OpenCVModel)
+
+    feature = opencv.transform(test_face_crop(224, 224))
+    reference = pkg_resources.resource_filename("bob.bio.face.test", "data/opencv.hdf5")
+    _compare(feature, reference)
+
+
+def test_tf():
+    data = _data()
+    tf = TensorFlowModel()
+    assert isinstance(tf, TensorFlowModel)
+
+    feature = tf.transform(test_face_crop(160, 160))
+    reference = pkg_resources.resource_filename("bob.bio.face.test", "data/tf.hdf5")
+    _compare(feature, reference)
+
+
+def test_pytorch_v1():
+    data = _data()
+    pytorch_v1 = PyTorchLoadedModel(weights=weights, config=config)
+    assert isinstance(pytorch_v1, PyTorchLoadedModel)
+
+    feature = pytorch_v1.transform(test_face_crop(224, 224))
+
+    reference = pkg_resources.resource_filename(
+        "bob.bio.face.test", "data/pytorch_v1.hdf5"
+    )
+    _compare(feature, reference)
+
+
+"""
+from bob.bio.face.embeddings.PyTorchModel import PyTorchLibraryModel
+from facenet_pytorch import InceptionResnetV1
+def test_pytorch_v2():
+    import h5py
+    data = _data()
+    model = InceptionResnetV1(pretrained='vggface2').eval()
+    pytorch_v2 = PyTorchLibraryModel(model=model)
+    assert isinstance(pytorch_v2, PyTorchLibraryModel)
+    
+    feature = pytorch_v2.transform(test_face_crop(160,160))
+    hf = h5py.File('pytorch_v2.hdf5', 'w')
+    hf.create_dataset('bob.bio.face.test/data/data1',data=feature)
+    hf.close()
+    reference = pkg_resources.resource_filename(
+        "bob.bio.face.test", "data/pytorch_v2.hdf5"
+    )
+    _compare(feature, reference)
+"""
+
+
+def test_mxnet():
+    data = _data()
+    mxnet = MxNetModel()
+    assert isinstance(mxnet, MxNetModel)
+
+    feature = mxnet.transform(test_face_crop(112, 112))
+    reference = pkg_resources.resource_filename("bob.bio.face.test", "data/mxnet.hdf5")
+    _compare(feature, reference)
+
diff --git a/requirements.txt b/requirements.txt
index aa33f11f..02e30ac5 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -21,7 +21,5 @@ matplotlib   # for plotting
 mxnet
 opencv-python
 six
-mxnet
-opencv-python
 scikit-image
-scikit-learn # for pipelines Tranformers
\ No newline at end of file
+scikit-learn # for pipelines Tranformers
-- 
GitLab