diff --git a/bob/bio/face/annotator/bobiptinyface.py b/bob/bio/face/annotator/bobiptinyface.py index 3b988a15c06693bafcac769a418c9e35a1a84c2b..a3702d20581da0f938cfa68757186a3637b0bdaa 100644 --- a/bob/bio/face/annotator/bobiptinyface.py +++ b/bob/bio/face/annotator/bobiptinyface.py @@ -29,6 +29,8 @@ class BobIpTinyface(Base): annotations = self.tinyface.detect(image) if annotations is not None: - return annotations[0] + r = annotations[0] + return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])} else: return None + diff --git a/bob/bio/face/config/annotator/tinyface.py b/bob/bio/face/config/annotator/tinyface.py index bf223ea9802174d27ca2b7c0a41c6da61fb56f2e..2274bfeff38e836c34e34cb1846915b58a6d769e 100644 --- a/bob/bio/face/config/annotator/tinyface.py +++ b/bob/bio/face/config/annotator/tinyface.py @@ -1,3 +1,3 @@ from bob.bio.face.annotator import BobIpTinyface -annotator = BobIpTinyface() \ No newline at end of file +annotator = BobIpTinyface() diff --git a/bob/bio/face/config/baseline/mxnet_pipe.py b/bob/bio/face/config/baseline/mxnet_pipe.py index 5d97d12fbe84de8d181b093eab3d8d50142e4212..449d01e3f4fbbb02e3bdeeeb8bfefd37759ed7ba 100644 --- a/bob/bio/face/config/baseline/mxnet_pipe.py +++ b/bob/bio/face/config/baseline/mxnet_pipe.py @@ -1,6 +1,6 @@ import bob.bio.base from bob.bio.face.preprocessor import FaceCrop -from bob.bio.face.extractor import MxNetModel +from bob.bio.face.extractor import mxnet_model from bob.bio.base.algorithm import Distance from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy import scipy.spatial @@ -39,7 +39,7 @@ transform_extra_arguments = ( ) -extractor_transformer = MxNetModel() +extractor_transformer = mxnet_model() algorithm = Distance( distance_function=scipy.spatial.distance.cosine, is_distance_function=True diff --git a/bob/bio/face/config/baseline/opencv_pipe.py b/bob/bio/face/config/baseline/opencv_pipe.py index bc9c5906877b17cd73905486e013681aa3569d83..aac2d3f1c45559e35545fb66a5c02ca97aa76790 100644 --- a/bob/bio/face/config/baseline/opencv_pipe.py +++ b/bob/bio/face/config/baseline/opencv_pipe.py @@ -1,7 +1,7 @@ import bob.bio.base from bob.bio.face.preprocessor import FaceCrop from bob.bio.base.transformers.preprocessor import PreprocessorTransformer -from bob.bio.face.extractor import OpenCVModel +from bob.bio.face.extractor import opencv_model from bob.bio.base.extractor import Extractor from bob.bio.base.transformers import ExtractorTransformer from bob.bio.base.algorithm import Distance @@ -41,12 +41,9 @@ transform_extra_arguments = ( else (("annotations", "annotations"),) ) -# Extractor - -weights = None # PATH/TO/WEIGHTS -config = None # PATH/TO/CONFIG -extractor_transformer = OpenCVModel(weights=weights, config=config) +# Extractor +extractor_transformer = opencv_model() # Algorithm diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v1.py b/bob/bio/face/config/baseline/pytorch_pipe_v1.py index 35efefec969b9b0fe259fe7d54533b80fbf08771..99e5dbada6a6c0d360dc5480d3ac3f79527efdca 100644 --- a/bob/bio/face/config/baseline/pytorch_pipe_v1.py +++ b/bob/bio/face/config/baseline/pytorch_pipe_v1.py @@ -1,6 +1,6 @@ import bob.bio.base from bob.bio.face.preprocessor import FaceCrop -from bob.bio.face.extractor import PyTorchLoadedModel +from bob.bio.face.extractor import pytorch_loaded_model from bob.bio.base.algorithm import Distance from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy import scipy.spatial @@ -23,11 +23,11 @@ else: fixed_positions = None -cropped_positions = {"leye": (110, 144), "reye": (110, 96)} +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} preprocessor_transformer = FaceCrop( cropped_image_size=(224, 224), - cropped_positions={"leye": (110, 144), "reye": (110, 96)}, + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, color_channel="rgb", fixed_positions=fixed_positions, ) @@ -39,7 +39,7 @@ transform_extra_arguments = ( ) -extractor_transformer = PyTorchLoadedModel() +extractor_transformer = pytorch_loaded_model() algorithm = Distance( distance_function=scipy.spatial.distance.cosine, is_distance_function=True @@ -61,3 +61,4 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer + diff --git a/bob/bio/face/config/baseline/pytorch_pipe_v2.py b/bob/bio/face/config/baseline/pytorch_pipe_v2.py index a65c6c43a9277432b5f6b6ea86160599f784c775..212d8bb91cdd5654162bf585342e567846beca3c 100644 --- a/bob/bio/face/config/baseline/pytorch_pipe_v2.py +++ b/bob/bio/face/config/baseline/pytorch_pipe_v2.py @@ -1,6 +1,6 @@ import bob.bio.base from bob.bio.face.preprocessor import FaceCrop -from bob.bio.face.extractor import PyTorchLibraryModel +from bob.bio.face.extractor import pytorch_library_model from facenet_pytorch import InceptionResnetV1 from bob.bio.base.algorithm import Distance from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy @@ -24,11 +24,11 @@ else: fixed_positions = None -cropped_positions = {"leye": (110, 144), "reye": (110, 96)} +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} preprocessor_transformer = FaceCrop( cropped_image_size=(224, 224), - cropped_positions={"leye": (110, 144), "reye": (110, 96)}, + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, color_channel="rgb", fixed_positions=fixed_positions, ) @@ -41,7 +41,7 @@ transform_extra_arguments = ( model = InceptionResnetV1(pretrained="vggface2").eval() -extractor_transformer = PyTorchLibraryModel(model=model) +extractor_transformer = pytorch_library_model(model=model) algorithm = Distance( @@ -64,3 +64,4 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer + diff --git a/bob/bio/face/config/baseline/tf_pipe.py b/bob/bio/face/config/baseline/tf_pipe.py index c4ea24a68baad4ff57d57d8f773f249284a01b6b..e6dfe6253470415783206dc63c1c463fa9ff2cf1 100644 --- a/bob/bio/face/config/baseline/tf_pipe.py +++ b/bob/bio/face/config/baseline/tf_pipe.py @@ -1,6 +1,6 @@ import bob.bio.base from bob.bio.face.preprocessor import FaceCrop -from bob.bio.face.extractor import TensorFlowModel +from bob.bio.face.extractor import tf_model from bob.bio.base.algorithm import Distance from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy import scipy.spatial @@ -24,11 +24,11 @@ else: # Preprocessor -cropped_positions = {"leye": (80, 100), "reye": (80, 60)} +cropped_positions = {"leye": (49, 72), "reye": (49, 38)} preprocessor_transformer = FaceCrop( cropped_image_size=(160, 160), - cropped_positions={"leye": (80, 100), "reye": (80, 60)}, + cropped_positions={"leye": (49, 72), "reye": (49, 38)}, color_channel="rgb", fixed_positions=fixed_positions, ) @@ -41,7 +41,7 @@ transform_extra_arguments = ( # Extractor -extractor_transformer = TensorFlowModel() +extractor_transformer = tf_model() # Algorithm algorithm = Distance( @@ -64,3 +64,4 @@ transformer = make_pipeline( # Assemble the Vanilla Biometric pipeline and execute pipeline = VanillaBiometricsPipeline(transformer, algorithm) transformer = pipeline.transformer + diff --git a/bob/bio/face/extractor/__init__.py b/bob/bio/face/extractor/__init__.py index d98275bc189a792df01b1031277063bfd6ff82a5..32b97163775852dbca316f538672be6bccf2095d 100644 --- a/bob/bio/face/extractor/__init__.py +++ b/bob/bio/face/extractor/__init__.py @@ -1,11 +1,11 @@ from .DCTBlocks import DCTBlocks from .GridGraph import GridGraph from .LGBPHS import LGBPHS -from .MxNetModel import MxNetModel -from .PyTorchModel import PyTorchLoadedModel -from .PyTorchModel import PyTorchLibraryModel -from .TensorFlowModel import TensorFlowModel -from .OpenCVModel import OpenCVModel +from .mxnet_resnet import mxnet_model +from .pytorch_model import pytorch_loaded_model +from .pytorch_model import pytorch_library_model +from .tf_model import tf_model +from .opencv_caffe import opencv_model # gets sphinx autodoc done right - don't remove it def __appropriate__(*args): @@ -27,10 +27,10 @@ __appropriate__( DCTBlocks, GridGraph, LGBPHS, - MxNetModel, - PyTorchLoadedModel, - PyTorchLibraryModel, - TensorFlowModel, - OpenCVModel, + mxnet_model, + pytorch_loaded_model, + pytorch_library_model, + tf_model, + opencv_model, ) __all__ = [_ for _ in dir() if not _.startswith("_")] diff --git a/bob/bio/face/extractor/mxnet_resnet.py b/bob/bio/face/extractor/mxnet_resnet.py new file mode 100644 index 0000000000000000000000000000000000000000..aab050bfadd851e015e8b19e736a52aa854807e9 --- /dev/null +++ b/bob/bio/face/extractor/mxnet_resnet.py @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> + +"""Feature extraction resnet models using mxnet interface""" +from sklearn.base import TransformerMixin, BaseEstimator +from sklearn.utils import check_array +import numpy as np +import pkg_resources +import os +import mxnet as mx +from mxnet import gluon +import warnings +from bob.extension import rc +mxnet_resnet_directory = rc["bob.extractor_model.mxnet"] +mxnet_weight_directory = rc["bob.extractor_weights.mxnet"] + +class mxnet_model(TransformerMixin, BaseEstimator): + + """Extracts features using deep face recognition models under MxNet Interfaces. + + Users can download the pretrained face recognition models with MxNet Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.mxnet`, and set config of the parameters to :py:class:`bob.extractor_weights.mxnet`. + + .. code-block:: sh + + $ bob config set bob.extractor_model.mxnet /PATH/TO/MODEL/ + $ bob config set bob.extractor_weights.mxnet /PATH/TO/WEIGHTS/ + + Examples: (Pretrained ResNet models): `LResNet100E-IR,ArcFace@ms1m-refine-v2 <https://github.com/deepinsight/insightface>`_ + + The extracted features can be combined with different the algorithms. + + **Parameters:** + use_gpu: True or False. + """ + + def __init__(self, use_gpu=False, **kwargs): + super().__init__(**kwargs) + self.model = None + self.use_gpu = use_gpu + + internal_path = pkg_resources.resource_filename( + __name__, os.path.join("data", "resnet"), + ) + + checkpoint_path = ( + internal_path + if rc["bob.bio.face.models.mxnet_resnet"] is None + else rc["bob.bio.face.models.mxnet_resnet"] + ) + + self.checkpoint_path = checkpoint_path + + def _load_model(self): + + ctx = mx.gpu() if self.use_gpu else mx.cpu() + + with warnings.catch_warnings(): + warnings.simplefilter("ignore") + deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx) + + self.model = deserialized_net + + def transform(self, X): + """__call__(image) -> feature + + Extracts the features from the given image. + + **Parameters:** + + image : 2D :py:class:`numpy.ndarray` (floats) + The image to extract the features from. + + **Returns:** + + feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats) + The list of features extracted from the image. + """ + + if self.model is None: + self.load_model() + + X = check_array(X, allow_nd=True) + X = mx.nd.array(X) + + return self.model(X,).asnumpy() + + + def __getstate__(self): + # Handling unpicklable objects + + d = self.__dict__.copy() + d["model"] = None + return d + + def _more_tags(self): + return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/opencv_caffe.py b/bob/bio/face/extractor/opencv_caffe.py new file mode 100644 index 0000000000000000000000000000000000000000..647fc724fe4e1087e488f89e24ed42de750552fa --- /dev/null +++ b/bob/bio/face/extractor/opencv_caffe.py @@ -0,0 +1,105 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> + +import bob.bio.base +from bob.bio.face.preprocessor import FaceCrop + +from bob.bio.base.transformers.preprocessor import PreprocessorTransformer + +import cv2 +import numpy as np + +from bob.learn.tensorflow.utils.image import to_channels_last +from sklearn.base import TransformerMixin, BaseEstimator +from sklearn.utils import check_array + +from bob.extension import rc +from functools import partial +import pkg_resources +import os + +from PIL import Image + +opencv_model_directory = rc["bob.extractor_model.opencv"] +opencv_model_prototxt = rc["bob.extractor_weights.opencv"] + + +class opencv_model(TransformerMixin, BaseEstimator): + """Extracts features using deep face recognition models under OpenCV Interface + + Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`. + + .. code-block:: sh + + $ bob config set bob.extractor_model.opencv /PATH/TO/MODEL/ + $ bob config set bob.extractor_weights.opencv /PATH/TO/WEIGHTS/ + + The extracted features can be combined with different the algorithms. + + .. note:: + This structure only can be used for CAFFE pretrained model. + + **Parameters:** + use_gpu: True or False. + """ + + + def __init__(self, use_gpu=False, **kwargs): + super().__init__(**kwargs) + self.model = None + self.use_gpu = use_gpu + + internal_path = pkg_resources.resource_filename( + __name__, os.path.join("data", "opencv_model"), + ) + + checkpoint_path = ( + internal_path + if rc["bob.bio.face.models.opencv"] is None + else rc["bob.bio.face.models.opencv"] + ) + + self.checkpoint_path = checkpoint_path + + def _load_model(self): + + net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory) + + self.model = net + + def transform(self, X): + """__call__(image) -> feature + + Extracts the features from the given image. + + **Parameters:** + + image : 2D :py:class:`numpy.ndarray` (floats) + The image to extract the features from. + + **Returns:** + + feature : 2D or 3D :py:class:`numpy.ndarray` (floats) + The list of features extracted from the image. + """ + + if self.model is None: + self.load_model() + + img = np.array(X) + + self.model.setInput(img) + + return self.model.forward() + + + def __getstate__(self): + # Handling unpicklable objects + + d = self.__dict__.copy() + d["model"] = None + return d + + def _more_tags(self): + return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/pytorch_model.py b/bob/bio/face/extractor/pytorch_model.py new file mode 100644 index 0000000000000000000000000000000000000000..52c4f172d1c74f7018429261a072d8185bd69289 --- /dev/null +++ b/bob/bio/face/extractor/pytorch_model.py @@ -0,0 +1,165 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> + +import torch +from bob.learn.tensorflow.utils.image import to_channels_last +from sklearn.base import TransformerMixin, BaseEstimator +from sklearn.utils import check_array +from bob.extension import rc +from functools import partial +import pkg_resources +import os +import numpy as np +import imp + +pytorch_model_directory = rc["bob.extractor_model.pytorch"] +pytorch_weight_directory = rc["bob.extractor_weights.pytorch"] + +class pytorch_loaded_model(TransformerMixin, BaseEstimator): + """Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand. + + Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`. + + .. code-block:: sh + + $ bob config set bob.extractor_model.pytorch /PATH/TO/MODEL/ + $ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/ + + The extracted features can be combined with different the algorithms. + + **Parameters:** + use_gpu: True or False. + """ + + def __init__(self, use_gpu=False, **kwargs): + super().__init__(**kwargs) + self.model = None + self.use_gpu = use_gpu + + internal_path = pkg_resources.resource_filename( + __name__, os.path.join("data", "resnet"), + ) + + checkpoint_path = ( + internal_path + if rc["bob.bio.face.models.pytorchmodel"] is None + else rc["bob.bio.face.models.pytorchmodel"] + ) + + self.checkpoint_path = checkpoint_path + self.device = None + + def _load_model(self): + + MainModel = imp.load_source('MainModel', pytorch_model_directory) + network = torch.load(pytorch_weight_directory) + network.eval() + + self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + network.to(self.device) + + self.model = network + + def transform(self, X): + """__call__(image) -> feature + + Extracts the features from the given image. + + **Parameters:** + + image : 2D :py:class:`numpy.ndarray` (floats) + The image to extract the features from. + + **Returns:** + + feature : 2D or 3D :py:class:`numpy.ndarray` (floats) + The list of features extracted from the image. + """ + + if self.model is None: + self.load_model() + + X = torch.Tensor(X) + + return self.model(X).detach().numpy() + + + def __getstate__(self): + # Handling unpicklable objects + + d = self.__dict__.copy() + d["model"] = None + return d + + def _more_tags(self): + return {"stateless": True, "requires_fit": False} + + + + + + + + +class pytorch_library_model(TransformerMixin, BaseEstimator): + """Extracts features using deep face recognition with registered model frames in the PyTorch Library. + + Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_ + + The extracted features can be combined with different the algorithms. + + **Parameters:** + model: pytorch model calling from library. + use_gpu: True or False. + """ + + def __init__(self, model=None, use_gpu=False, **kwargs): + super().__init__(**kwargs) + self.model = model + self.use_gpu = use_gpu + + internal_path = pkg_resources.resource_filename( + __name__, os.path.join("data", "resnet"), + ) + + checkpoint_path = ( + internal_path + if rc["bob.bio.face.models.pytorchmodel"] is None + else rc["bob.bio.face.models.pytorchmodel"] + ) + + self.checkpoint_path = checkpoint_path + self.device = None + + def transform(self, X): + """__call__(image) -> feature + + Extracts the features from the given image. + + **Parameters:** + + image : 2D :py:class:`numpy.ndarray` (floats) + The image to extract the features from. + + **Returns:** + + feature : 2D or 3D :py:class:`numpy.ndarray` (floats) + The list of features extracted from the image. + """ + + X = torch.Tensor(X) + + return self.model(X).detach().numpy() + + + def __getstate__(self): + # Handling unpicklable objects + + d = self.__dict__.copy() + d["model"] = None + return d + + def _more_tags(self): + return {"stateless": True, "requires_fit": False} diff --git a/bob/bio/face/extractor/tf_model.py b/bob/bio/face/extractor/tf_model.py new file mode 100644 index 0000000000000000000000000000000000000000..9e83ec389dafb05b1d9031c52338e235a9086ef8 --- /dev/null +++ b/bob/bio/face/extractor/tf_model.py @@ -0,0 +1,95 @@ +#!/usr/bin/env python +# vim: set fileencoding=utf-8 : +# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch> + +import tensorflow as tf +from bob.extension import rc +from bob.learn.tensorflow.utils.image import to_channels_last +from sklearn.base import TransformerMixin, BaseEstimator +from sklearn.utils import check_array +from tensorflow.keras import Sequential +from tensorflow.keras.layers.experimental import preprocessing +from functools import partial +import pkg_resources +import os +import numpy as np +from tensorflow import keras + +tf_model_directory = rc["bob.extractor_model.tf"] + +class tf_model(TransformerMixin, BaseEstimator): + """Extracts features using deep face recognition models under TensorFlow Interface. + + Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`. + + .. code-block:: sh + + $ bob config set bob.extractor_model.tf /PATH/TO/MODEL/ + + The extracted features can be combined with different the algorithms. + + + **Parameters:** + use_gpu: True or False. + """ + + def __init__(self, use_gpu=False, **kwargs): + super().__init__(**kwargs) + self.model = None + self.use_gpu = use_gpu + + internal_path = pkg_resources.resource_filename( + __name__, os.path.join("data", "resnet"), + ) + + checkpoint_path = ( + internal_path + if rc["bob.bio.face.models.tfmodel"] is None + else rc["bob.bio.face.models.tfmodel"] + ) + + self.checkpoint_path = checkpoint_path + + def _load_model(self): + + model = tf.keras.models.load_model(tf_model_directory) + + self.model = model + + def transform(self, X): + """__call__(image) -> feature + + Extracts the features from the given image. + + **Parameters:** + + image : 2D :py:class:`numpy.ndarray` (floats) + The image to extract the features from. + + **Returns:** + + feature : 2D or 3D :py:class:`numpy.ndarray` (floats) + The list of features extracted from the image. + """ + + if self.model is None: + self.load_model() + + X = check_array(X, allow_nd=True) + X = tf.convert_to_tensor(X) + X = to_channels_last(X) + predict = self.model.predict(X) + + + return predict + + + def __getstate__(self): + # Handling unpicklable objects + + d = self.__dict__.copy() + d["model"] = None + return d + + def _more_tags(self): + return {"stateless": True, "requires_fit": False} diff --git a/doc/baselines.rst b/doc/baselines.rst index 4f8d9c2da0043f74340d937ba66169613d803863..88c42a629cd6a9eb1ccd18fe92146aa3bbfc499c 100644 --- a/doc/baselines.rst +++ b/doc/baselines.rst @@ -61,14 +61,12 @@ Deep learning baselines Deep Learning with different interfaces baselines ================================================= -* ``mxnet-pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_ +* ``mxnet_pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_ -* ``mxnet-tinyface``: Applying `tinyface annoator <https://github.com/chinakook/hr101_mxnet>`_ for the Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_ +* ``pytorch_pipe_v1``: Pytorch network that extracs 1000-dimensional featrues, trained by Manual Gunther, as described in [LGB18]_ -* ``pytorch-pipe-v1``: Pytorch network that extracts 1000-dimensional features, trained by Manual Gunther, as described in [LGB18]_ +* ``pytorch_pipe_v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_ -* ``pytorch-pipe-v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_ +* ``tf_pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_ -* ``tf-pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_ - -* ``opencv-pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_ +* ``opencv_pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_ diff --git a/doc/implemented.rst b/doc/implemented.rst index 7a3074a4f92189f275caa5708a094ce8e6b07f75..75acc7129ef0132eb8ad762c73ddf7e53cb5b204 100644 --- a/doc/implemented.rst +++ b/doc/implemented.rst @@ -13,7 +13,6 @@ Databases .. autosummary:: bob.bio.face.database.ARFaceBioDatabase bob.bio.face.database.AtntBioDatabase - bob.bio.face.database.CasiaAfricaDatabase bob.bio.face.database.MobioDatabase bob.bio.face.database.ReplayBioDatabase bob.bio.face.database.ReplayMobileBioDatabase @@ -58,11 +57,11 @@ Image Feature Extractors bob.bio.face.extractor.DCTBlocks bob.bio.face.extractor.GridGraph bob.bio.face.extractor.LGBPHS - bob.bio.face.extractor.MxNetModel - bob.bio.face.extractor.PyTorchLoadedModel - bob.bio.face.extractor.PyTorchLibraryModel - bob.bio.face.extractor.TensorFlowModel - bob.bio.face.extractor.OpenCVModel + bob.bio.face.extractor.mxnet_model + bob.bio.face.extractor.pytorch_loaded_model + bob.bio.face.extractor.pytorch_library_model + bob.bio.face.extractor.tf_model + bob.bio.face.extractor.opencv_model Face Recognition Algorithms ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/references.rst b/doc/references.rst index 7d6d919cf3fd7011b4165ca381b211d023c93c95..28b98d5f0cbb37e59bfb9f9623b31b966493ed85 100644 --- a/doc/references.rst +++ b/doc/references.rst @@ -17,4 +17,4 @@ References .. [ZSQ09] *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009. .. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816. .. [HRM06] *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006. -.. [LGB18] *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. IEEE Winter Conference on Applications of Computer Vision (WACV), 2018. +.. [LGB18] *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), Lake Tahoe, NV, USA, 2018, pp. 131-140, doi: 10.1109/WACV.2018.00021. diff --git a/setup.py b/setup.py index 13811f157cca4b27a5710c794724cbd78cfa1ce8..1f278141e0e97753d13de4c23fd038fe3b446648 100644 --- a/setup.py +++ b/setup.py @@ -113,11 +113,6 @@ setup( "meds = bob.bio.face.config.database.meds:database", "morph = bob.bio.face.config.database.morph:database", "casia-africa = bob.bio.face.config.database.casia_africa:database", -<<<<<<< HEAD - "pola-thermal = bob.bio.face.config.database.pola_thermal:database", - "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2:database", -======= ->>>>>>> 2 ], "bob.bio.annotator": [ "facedetect = bob.bio.face.config.annotator.facedetect:annotator", @@ -142,10 +137,9 @@ setup( "lgbphs = bob.bio.face.config.baseline.lgbphs:transformer", "dummy = bob.bio.face.config.baseline.dummy:transformer", "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:transformer", - "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:transformer", "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:transformer", "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:transformer", - "tf-pipe = bob.bio.face.config.baseline.tf_pipe:transformer", + "tf-pipe = bob.bio.face.config.baseline.ty_pipe:transformer", "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:transformer", ], # baselines @@ -161,13 +155,11 @@ setup( "lda = bob.bio.face.config.baseline.lda:pipeline", "dummy = bob.bio.face.config.baseline.dummy:pipeline", "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline", - "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021:pipeline", "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021", "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:pipeline", - "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:pipeline", "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:pipeline", "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:pipeline", - "tf-pipe = bob.bio.face.config.baseline.tf_pipe:pipeline", + "tf-pipe = bob.bio.face.config.baseline.ty_pipe:pipeline", "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:pipeline", ], "bob.bio.config": [ @@ -181,10 +173,9 @@ setup( "lgbphs = bob.bio.face.config.baseline.lgbphs", "lda = bob.bio.face.config.baseline.lda", "mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe", - "mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface", "pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1", "pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2", - "tf-pipe = bob.bio.face.config.baseline.tf_pipe", + "tf-pipe = bob.bio.face.config.baseline.ty_pipe", "opencv-pipe = bob.bio.face.config.baseline.opencv_pipe", "arface = bob.bio.face.config.database.arface", "atnt = bob.bio.face.config.database.atnt", @@ -204,11 +195,7 @@ setup( "meds = bob.bio.face.config.database.meds", "casia-africa = bob.bio.face.config.database.casia_africa", "morph = bob.bio.face.config.database.morph", - "casia-africa = bob.bio.face.config.database.casia_africa", - "pola-thermal = bob.bio.face.config.database.pola_thermal", - "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2", "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021", - "resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021", "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021", ], "bob.bio.cli": [