diff --git a/bob/bio/face/config/baseline/mxnet_tinyface_pipe.py b/bob/bio/face/config/baseline/mxnet_tinyface_pipe.py deleted file mode 100644 index e19969d0ea08c874aff7cbc5b65197cd7446da69..0000000000000000000000000000000000000000 --- a/bob/bio/face/config/baseline/mxnet_tinyface_pipe.py +++ /dev/null @@ -1,30 +0,0 @@ -import bob.bio.base -from bob.bio.face.preprocessor import FaceCrop -from bob.bio.face.annotator import BobIpTinyface -from bob.bio.face.extractor import MxNetModel - -from bob.bio.base.algorithm import Distance -from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy -import scipy.spatial -from bob.bio.base.pipelines.vanilla_biometrics import Distance -from sklearn.pipeline import make_pipeline -from bob.pipelines import wrap -from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline - - -annotator_transformer = BobIpTinyface() - -preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',annotator=annotator_transformer) - -extractor_transformer = MxNetModel() - - -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) - -transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer), - wrap(["sample"], extractor_transformer) -) - -pipeline = VanillaBiometricsPipeline(transformer, algorithm) -transformer = pipeline.transformer \ No newline at end of file diff --git a/bob/bio/face/config/baseline/opencv_tinyface_pipe.py b/bob/bio/face/config/baseline/opencv_tinyface_pipe.py deleted file mode 100644 index 6a61be6c8c22bbbccdb9ca44621a2e31f134acf1..0000000000000000000000000000000000000000 --- a/bob/bio/face/config/baseline/opencv_tinyface_pipe.py +++ /dev/null @@ -1,70 +0,0 @@ -import bob.bio.base -from bob.bio.face.preprocessor import FaceCrop -from bob.bio.face.annotator.bobiptinyface import BobIpTinyface -from bob.bio.base.transformers.preprocessor import PreprocessorTransformer - -from bob.bio.face.extractor import opencv_model - -from bob.bio.base.algorithm import Distance -from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy -import scipy.spatial -from bob.bio.base.pipelines.vanilla_biometrics import Distance - - -from sklearn.pipeline import make_pipeline -from bob.pipelines import wrap -from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline - - -#memory_demanding = False -#if "database" in locals(): -# annotation_type = database.annotation_type -# fixed_positions = database.fixed_positions -# memory_demanding = ( -# database.memory_demanding if hasattr(database, "memory_demanding") else False -# ) - -#else: -# annotation_type = None -# fixed_positions = None - - -annotator_transformer = BobIpTinyface() - - -#right_eye=annotator_transformer.annotate()["reye"] -#left_eye=annotator_transformer.annotate()["leye"] -#cropped_positions={'leye':left_eye, 'reye':right_eye} -#topleft = annotator_transformer.annotate()["topleft"] -#bottomright = annotator_transformer.annotate()["bottomright"] -cropped_positions={'leye':(49,72), 'reye':(49,38)} -preprocessor_transformer = FaceCrop(cropped_image_size=(224,224),cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb', annotator = annotator_transformer) - - - - - - -extractor_transformer = opencv_model() - - - - -algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True) - - -# Chain the Transformers together -#transformer = make_pipeline( -# wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments), -# wrap(["sample"], extractor_transformer) -# # Add more transformers here if needed -#) - -transformer = make_pipeline( - wrap(["sample"], preprocessor_transformer), - wrap(["sample"], extractor_transformer) -) - -# Assemble the Vanilla Biometric pipeline and execute -pipeline = VanillaBiometricsPipeline(transformer, algorithm) -transformer = pipeline.transformer diff --git a/bob/bio/face/extractor/MxNetModel.py b/bob/bio/face/extractor/MxNetModel.py deleted file mode 100644 index b35750cd2e5896a45551450c896555968b32e7de..0000000000000000000000000000000000000000 --- a/bob/bio/face/extractor/MxNetModel.py +++ /dev/null @@ -1,97 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> - -"""Feature extraction resnet models using mxnet interface""" -from sklearn.base import TransformerMixin, BaseEstimator -from sklearn.utils import check_array -import numpy as np -import pkg_resources -import os -import mxnet as mx -from mxnet import gluon -import warnings -from bob.extension import rc -mxnet_resnet_directory = rc["bob.extractor_model.mxnet"] -mxnet_weight_directory = rc["bob.extractor_weights.mxnet"] - -class MxNetModel(TransformerMixin, BaseEstimator): - - """Extracts features using deep face recognition models under MxNet Interfaces. - - Users can download the pretrained face recognition models with MxNet Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.mxnet`, and set config of the parameters to :py:class:`bob.extractor_weights.mxnet`. - - .. code-block:: sh - - $ bob config set bob.extractor_model.mxnet /PATH/TO/MODEL/ - $ bob config set bob.extractor_weights.mxnet /PATH/TO/WEIGHTS/ - - Examples: (Pretrained ResNet models): `LResNet100E-IR,ArcFace@ms1m-refine-v2 <https://github.com/deepinsight/insightface>`_ - - The extracted features can be combined with different the algorithms. - - **Parameters:** - use_gpu: True or False. - """ - - def __init__(self, use_gpu=False, **kwargs): - super().__init__(**kwargs) - self.model = None - self.use_gpu = use_gpu - - internal_path = pkg_resources.resource_filename( - __name__, os.path.join("data", "resnet"), - ) - - checkpoint_path = ( - internal_path - if rc["bob.bio.face.models.mxnet_resnet"] is None - else rc["bob.bio.face.models.mxnet_resnet"] - ) - - self.checkpoint_path = checkpoint_path - - def _load_model(self): - - ctx = mx.gpu() if self.use_gpu else mx.cpu() - - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx) - - self.model = deserialized_net - - def transform(self, X): - """__call__(image) -> feature - - Extracts the features from the given image. - - **Parameters:** - - image : 2D :py:class:`numpy.ndarray` (floats) - The image to extract the features from. - - **Returns:** - - feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats) - The list of features extracted from the image. - """ - - if self.model is None: - self._load_model() - - X = check_array(X, allow_nd=True) - X = mx.nd.array(X) - - return self.model(X,).asnumpy() - - - def __getstate__(self): - # Handling unpicklable objects - - d = self.__dict__.copy() - d["model"] = None - return d - - def _more_tags(self): - return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/OpenCVModel.py b/bob/bio/face/extractor/OpenCVModel.py deleted file mode 100644 index 3fbdb499d5970a4c3757a6d2f250c4d7149b7589..0000000000000000000000000000000000000000 --- a/bob/bio/face/extractor/OpenCVModel.py +++ /dev/null @@ -1,102 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> - -import bob.bio.base -from bob.bio.face.preprocessor import FaceCrop - -from bob.bio.base.transformers.preprocessor import PreprocessorTransformer - -import cv2 -import numpy as np - -from bob.learn.tensorflow.utils.image import to_channels_last -from sklearn.base import TransformerMixin, BaseEstimator -from sklearn.utils import check_array - -from bob.extension import rc -from functools import partial -import pkg_resources -import os - -from PIL import Image - -opencv_model_directory = rc["bob.extractor_model.opencv"] -opencv_model_prototxt = rc["bob.extractor_weights.opencv"] - - -class OpenCVModel(TransformerMixin, BaseEstimator): - """Extracts features using deep face recognition models under OpenCV Interface - - Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`. - - .. code-block:: sh - - $ bob config set bob.extractor_model.opencv /PATH/TO/MODEL/ - $ bob config set bob.extractor_weights.opencv /PATH/TO/WEIGHTS/ - - The extracted features can be combined with different the algorithms. - - .. note:: - This structure only can be used for CAFFE pretrained model. - """ - - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.model = None - - internal_path = pkg_resources.resource_filename( - __name__, os.path.join("data", "opencv_model"), - ) - - checkpoint_path = ( - internal_path - if rc["bob.bio.face.models.opencv"] is None - else rc["bob.bio.face.models.opencv"] - ) - - self.checkpoint_path = checkpoint_path - - def _load_model(self): - - net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory) - - self.model = net - - def transform(self, X): - """__call__(image) -> feature - - Extracts the features from the given image. - - **Parameters:** - - image : 2D :py:class:`numpy.ndarray` (floats) - The image to extract the features from. - - **Returns:** - - feature : 2D or 3D :py:class:`numpy.ndarray` (floats) - The list of features extracted from the image. - """ - - if self.model is None: - self._load_model() - - img = np.array(X) - img = img/255 - - self.model.setInput(img) - - return self.model.forward() - - - def __getstate__(self): - # Handling unpicklable objects - - d = self.__dict__.copy() - d["model"] = None - return d - - def _more_tags(self): - return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/PyTorchModel.py b/bob/bio/face/extractor/PyTorchModel.py deleted file mode 100644 index 883e19c8c243cad3f33fabd27dabe9166856c888..0000000000000000000000000000000000000000 --- a/bob/bio/face/extractor/PyTorchModel.py +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> - -import torch -from bob.learn.tensorflow.utils.image import to_channels_last -from sklearn.base import TransformerMixin, BaseEstimator -from sklearn.utils import check_array -from bob.extension import rc -from functools import partial -import pkg_resources -import os -import numpy as np -import imp - -pytorch_model_directory = rc["bob.extractor_model.pytorch"] -pytorch_weight_directory = rc["bob.extractor_weights.pytorch"] - -class PyTorchLoadedModel(TransformerMixin, BaseEstimator): - """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand. - - Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`. - - .. code-block:: sh - - $ bob config set bob.extractor_model.pytorch /PATH/TO/MODEL/ - $ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/ - - The extracted features can be combined with different the algorithms. - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.model = None - - internal_path = pkg_resources.resource_filename( - __name__, os.path.join("data", "resnet"), - ) - - checkpoint_path = ( - internal_path - if rc["bob.bio.face.models.pytorchmodel"] is None - else rc["bob.bio.face.models.pytorchmodel"] - ) - - self.checkpoint_path = checkpoint_path - self.device = None - - def _load_model(self): - - MainModel = imp.load_source('MainModel', pytorch_model_directory) - network = torch.load(pytorch_weight_directory) - network.eval() - - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - - network.to(self.device) - - self.model = network - - def transform(self, X): - """__call__(image) -> feature - - Extracts the features from the given image. - - **Parameters:** - - image : 2D :py:class:`numpy.ndarray` (floats) - The image to extract the features from. - - **Returns:** - - feature : 2D or 3D :py:class:`numpy.ndarray` (floats) - The list of features extracted from the image. - """ - if self.model is None: - self._load_model() - - X = torch.Tensor(X) - X = X/255 - - return self.model(X).detach().numpy() - - - def __getstate__(self): - # Handling unpicklable objects - - d = self.__dict__.copy() - d["model"] = None - return d - - def _more_tags(self): - return {"stateless": True, "requires_fit": False} - - - - - - - - -class PyTorchLibraryModel(TransformerMixin, BaseEstimator): - """Extracts features using deep face recognition with registered model frames in the PyTorch Library. - - Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_ - - The extracted features can be combined with different the algorithms. - - **Parameters:** - model: pytorch model calling from library. - """ - - def __init__(self, model=None, **kwargs): - super().__init__(**kwargs) - self.model = model - - internal_path = pkg_resources.resource_filename( - __name__, os.path.join("data", "resnet"), - ) - - checkpoint_path = ( - internal_path - if rc["bob.bio.face.models.pytorchmodel"] is None - else rc["bob.bio.face.models.pytorchmodel"] - ) - - self.checkpoint_path = checkpoint_path - self.device = None - - def _load_model(self): - - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - self.model.to(self.device) - - def transform(self, X): - """__call__(image) -> feature - - Extracts the features from the given image. - - **Parameters:** - - image : 2D :py:class:`numpy.ndarray` (floats) - The image to extract the features from. - - **Returns:** - - feature : 2D or 3D :py:class:`numpy.ndarray` (floats) - The list of features extracted from the image. - """ - - if self.model is None: - self._load_model() - - X = torch.Tensor(X) - X = X/255 - - return self.model(X).detach().numpy() - - - def __getstate__(self): - # Handling unpicklable objects - - d = self.__dict__.copy() - d["model"] = None - return d - - def _more_tags(self): - return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/PyTorchModel.py~HEAD b/bob/bio/face/extractor/PyTorchModel.py~HEAD index 24e02bc3013e81463c799b8ca95ab6bf0fb5d5f7..85473ac4d2626f27cd0edf9f3fe0a2ac238631eb 100644 --- a/bob/bio/face/extractor/PyTorchModel.py~HEAD +++ b/bob/bio/face/extractor/PyTorchModel.py~HEAD @@ -32,6 +32,7 @@ class PyTorchLoadedModel(TransformerMixin, BaseEstimator): def __init__(self, **kwargs): super().__init__(**kwargs) +<<<<<<< HEAD:bob/bio/face/extractor/PyTorchModel.py~HEAD self.model = None internal_path = pkg_resources.resource_filename( @@ -43,6 +44,27 @@ class PyTorchLoadedModel(TransformerMixin, BaseEstimator): if rc["bob.bio.face.models.pytorchmodel"] is None else rc["bob.bio.face.models.pytorchmodel"] ) +======= + self.weights = weights + self.config = config + + if self.config == None and self.weights == None: + internal_path = pkg_resources.resource_filename( + __name__, os.path.join("data", "AFFFE-42a53f19", ""), + ) + + checkpoint_path = ( + internal_path + if rc["bob.bio.face.models.AFFFE"] is None + else rc["bob.bio.face.models.AFFFE"] + ) + + urls = [ + "https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz" + ] + + download_model(checkpoint_path, urls, "AFFFE-42a53f19.tar.gz") +>>>>>>> Update bob/bio/face/extractor/__init__.py, bob/bio/face/test/test_extractors.py, bob/bio/face/embeddings/PyTorchModel.py files:bob/bio/face/embeddings/PyTorchModel.py self.checkpoint_path = checkpoint_path self.device = None diff --git a/bob/bio/face/extractor/TensorFlowModel.py b/bob/bio/face/extractor/TensorFlowModel.py deleted file mode 100644 index b1ddcdd55f947c5aacbb18d1beab015852dd28e3..0000000000000000000000000000000000000000 --- a/bob/bio/face/extractor/TensorFlowModel.py +++ /dev/null @@ -1,91 +0,0 @@ -#!/usr/bin/env python -# vim: set fileencoding=utf-8 : -# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> - -import tensorflow as tf -from bob.extension import rc -from bob.learn.tensorflow.utils.image import to_channels_last -from sklearn.base import TransformerMixin, BaseEstimator -from sklearn.utils import check_array -from tensorflow.keras import Sequential -from tensorflow.keras.layers.experimental import preprocessing -from functools import partial -import pkg_resources -import os -import numpy as np -from tensorflow import keras - -tf_model_directory = rc["bob.extractor_model.tf"] - -class TensorFlowModel(TransformerMixin, BaseEstimator): - """Extracts features using deep face recognition models under TensorFlow Interface. - - Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`. - - .. code-block:: sh - - $ bob config set bob.extractor_model.tf /PATH/TO/MODEL/ - - The extracted features can be combined with different the algorithms. - """ - - def __init__(self, **kwargs): - super().__init__(**kwargs) - self.model = None - - internal_path = pkg_resources.resource_filename( - __name__, os.path.join("data", "resnet"), - ) - - checkpoint_path = ( - internal_path - if rc["bob.bio.face.models.tfmodel"] is None - else rc["bob.bio.face.models.tfmodel"] - ) - - self.checkpoint_path = checkpoint_path - - def _load_model(self): - - model = tf.keras.models.load_model(tf_model_directory) - - self.model = model - - def transform(self, X): - """__call__(image) -> feature - - Extracts the features from the given image. - - **Parameters:** - - image : 2D :py:class:`numpy.ndarray` (floats) - The image to extract the features from. - - **Returns:** - - feature : 2D or 3D :py:class:`numpy.ndarray` (floats) - The list of features extracted from the image. - """ - - if self.model is None: - self._load_model() - - X = check_array(X, allow_nd=True) - X = tf.convert_to_tensor(X) - X = to_channels_last(X) - - X = X/255 - predict = self.model.predict(X) - - return predict - - - def __getstate__(self): - # Handling unpicklable objects - - d = self.__dict__.copy() - d["model"] = None - return d - - def _more_tags(self): - return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py index d98275bc189a792df01b1031277063bfd6ff82a5..851963904bc693be732108999d1eeea29af03f11 100644 --- a/bob/bio/face/extractor/__init__.py +++ b/bob/bio/face/extractor/__init__.py @@ -1,11 +1,6 @@ from .DCTBlocks import DCTBlocks from .GridGraph import GridGraph from .LGBPHS import LGBPHS -from .MxNetModel import MxNetModel -from .PyTorchModel import PyTorchLoadedModel -from .PyTorchModel import PyTorchLibraryModel -from .TensorFlowModel import TensorFlowModel -from .OpenCVModel import OpenCVModel # gets sphinx autodoc done right - don't remove it def __appropriate__(*args): @@ -23,14 +18,5 @@ def __appropriate__(*args): obj.__module__ = __name__ -__appropriate__( - DCTBlocks, - GridGraph, - LGBPHS, - MxNetModel, - PyTorchLoadedModel, - PyTorchLibraryModel, - TensorFlowModel, - OpenCVModel, -) +__appropriate__(DCTBlocks, GridGraph, LGBPHS) __all__ = [_ for _ in dir() if not _.startswith("_")] diff --git a/bob/bio/face/test/test_extractors.py b/bob/bio/face/test/test_extractors.py index 62760f8e4d81c67f6209457f41df7db60758a9f4..082db33ff8ac059b98c0bb3ae221072d2bdfdcae 100644 --- a/bob/bio/face/test/test_extractors.py +++ b/bob/bio/face/test/test_extractors.py @@ -26,6 +26,16 @@ import math import bob.io.base.test_utils import pkg_resources +from bob.bio.face.embeddings.GenericOpenCV import OpenCVModel +from bob.db.base import read_annotation_file +from bob.bio.face.embeddings.TensorFlowModel import TensorFlowModel +from bob.bio.face.embeddings.PyTorchModel import PyTorchLoadedModel +from bob.bio.face.embeddings.MxNetModel import MxNetModel +from bob.bio.face.embeddings.PyTorchModel import PyTorchLoadedModel +from bob.bio.face.embeddings.PyTorchModel import PyTorchLibraryModel + +import pytest +from bob.bio.base.test.utils import is_library_available regenerate_refs = False @@ -212,6 +222,8 @@ def _annotation(): ) +@pytest.mark.slow +@is_library_available("opencv-python") def test_opencv(): data = _data() opencv = bob.bio.face.embeddings.GenericOpenCV.OpenCVModel() @@ -222,6 +234,8 @@ def test_opencv(): _compare(feature, reference) +@pytest.mark.slow +@is_library_available("tensorflow") def test_tf(): data = _data() tf = TensorFlowModel() @@ -232,9 +246,11 @@ def test_tf(): _compare(feature, reference) +@pytest.mark.slow +@is_library_available("torch") def test_pytorch_v1(): data = _data() - pytorch_v1 = PyTorchLoadedModel(weights=weights, config=config) + pytorch_v1 = PyTorchLoadedModel() assert isinstance(pytorch_v1, PyTorchLoadedModel) feature = pytorch_v1.transform(test_face_crop(224, 224)) @@ -246,8 +262,9 @@ def test_pytorch_v1(): """ -from bob.bio.face.embeddings.PyTorchModel import PyTorchLibraryModel from facenet_pytorch import InceptionResnetV1 +@pytest.mark.slow +@is_library_available("torch") def test_pytorch_v2(): import h5py data = _data() @@ -266,6 +283,8 @@ def test_pytorch_v2(): """ +@pytest.mark.slow +@is_library_available("mxnet") def test_mxnet(): data = _data() mxnet = MxNetModel() @@ -274,4 +293,3 @@ def test_mxnet(): feature = mxnet.transform(test_face_crop(112, 112)) reference = pkg_resources.resource_filename("bob.bio.face.test", "data/mxnet.hdf5") _compare(feature, reference) -