diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py index afe4c9405b1f7443f9e66881e3b1a91bc2f8801d..bc9c5906877b17cd73905486e013681aa3569d83 100644 --- a/bob/bio/face/config/baseline/opencv_pipe.py +++ b/bob/bio/face/config/baseline/opencv_pipe.py @@ -41,9 +41,12 @@ transform_extra_arguments = ( else (("annotations", "annotations"),) ) - # Extractor -extractor_transformer = OpenCVModel() + +weights = None # PATH/TO/WEIGHTS +config = None # PATH/TO/CONFIG + +extractor_transformer = OpenCVModel(weights=weights, config=config) # Algorithm diff --git a/bob/bio/face/test/test_baselines.py b/bob/bio/face/test/test_baselines.py index bbe3c4c9afeaa468fd789496e05f6ad88c81885a..df1f5de6964188faf44f8459b2762973a5da093a 100644 --- a/bob/bio/face/test/test_baselines.py +++ b/bob/bio/face/test/test_baselines.py @@ -152,3 +152,33 @@ def test_gabor_graph(): # def test_lda(): # run_baseline("lda", get_fake_samples_for_training()) + + +@pytest.mark.slow +@is_library_available("opencv-python") +def test_opencv_pipe(): + run_baseline("opencv-pipe", target_scores=None) + + +@pytest.mark.slow +@is_library_available("mxnet") +def test_mxnet_pipe(): + run_baseline("mxnet-pipe", target_scores=None) + + +@pytest.mark.slow +@is_library_available("tensorflow") +def test_tf_pipe(): + run_baseline("tf-pipe", target_scores=None) + + +@pytest.mark.slow +@is_library_available("torch") +def test_pytorch_pipe_v1(): + run_baseline("pytorch-pipe-v1", target_scores=None) + + +@pytest.mark.slow +@is_library_available("torch") +def test_pytorch_pipe_v2(): + run_baseline("pytorch-pipe-v2", target_scores=None) diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py index c935beb2848b067fa87a51c9db0ee688f8c961d5..62760f8e4d81c67f6209457f41df7db60758a9f4 100644 --- a/bob/bio/face/test/test_extractors.py +++ b/bob/bio/face/test/test_extractors.py @@ -178,3 +178,100 @@ def test_lgbphs(): "bob.bio.face.test", "data/lgbphs_with_phase.hdf5" ) _compare(feature, reference) + + +def test_face_crop(height=112, width=112): + # read input + image, annotation = _image(), _annotation() + CROPPED_IMAGE_HEIGHT = height + CROPPED_IMAGE_WIDTH = width + + # preprocessor with fixed eye positions (which correspond to th ones + fixed_cropper = bob.bio.face.preprocessor.FaceCrop( + cropped_image_size=(CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH), + color_channel="rgb", + cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}, + fixed_positions={"reye": annotation["reye"], "leye": annotation["leye"]}, + ) + + cropped = fixed_cropper.transform([image]) + return cropped + + +def _image(): + return bob.io.base.load( + pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.jpg") + ) + + +def _annotation(): + + return read_annotation_file( + pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.pos"), + "named", + ) + + +def test_opencv(): + data = _data() + opencv = bob.bio.face.embeddings.GenericOpenCV.OpenCVModel() + assert isinstance(opencv, OpenCVModel) + + feature = opencv.transform(test_face_crop(224, 224)) + reference = pkg_resources.resource_filename("bob.bio.face.test", "data/opencv.hdf5") + _compare(feature, reference) + + +def test_tf(): + data = _data() + tf = TensorFlowModel() + assert isinstance(tf, TensorFlowModel) + + feature = tf.transform(test_face_crop(160, 160)) + reference = pkg_resources.resource_filename("bob.bio.face.test", "data/tf.hdf5") + _compare(feature, reference) + + +def test_pytorch_v1(): + data = _data() + pytorch_v1 = PyTorchLoadedModel(weights=weights, config=config) + assert isinstance(pytorch_v1, PyTorchLoadedModel) + + feature = pytorch_v1.transform(test_face_crop(224, 224)) + + reference = pkg_resources.resource_filename( + "bob.bio.face.test", "data/pytorch_v1.hdf5" + ) + _compare(feature, reference) + + +""" +from bob.bio.face.embeddings.PyTorchModel import PyTorchLibraryModel +from facenet_pytorch import InceptionResnetV1 +def test_pytorch_v2(): + import h5py + data = _data() + model = InceptionResnetV1(pretrained='vggface2').eval() + pytorch_v2 = PyTorchLibraryModel(model=model) + assert isinstance(pytorch_v2, PyTorchLibraryModel) + + feature = pytorch_v2.transform(test_face_crop(160,160)) + hf = h5py.File('pytorch_v2.hdf5', 'w') + hf.create_dataset('bob.bio.face.test/data/data1',data=feature) + hf.close() + reference = pkg_resources.resource_filename( + "bob.bio.face.test", "data/pytorch_v2.hdf5" + ) + _compare(feature, reference) +""" + + +def test_mxnet(): + data = _data() + mxnet = MxNetModel() + assert isinstance(mxnet, MxNetModel) + + feature = mxnet.transform(test_face_crop(112, 112)) + reference = pkg_resources.resource_filename("bob.bio.face.test", "data/mxnet.hdf5") + _compare(feature, reference) + diff --git a/requirements.txt b/requirements.txt index aa33f11f510faa657337800b0969ed16d350f4b8..02e30ac5109f4d32cb93c768ec7a78195f25de94 100644 --- a/requirements.txt +++ b/requirements.txt @@ -21,7 +21,5 @@ matplotlib # for plotting mxnet opencv-python six -mxnet -opencv-python scikit-image -scikit-learn # for pipelines Tranformers \ No newline at end of file +scikit-learn # for pipelines Tranformers