Skip to content
Snippets Groups Projects
Commit a0973f78 authored by Yu LINGHU's avatar Yu LINGHU Committed by Tiago de Freitas Pereira
Browse files

clone the updates and recommit

parent bf618295
No related branches found
No related tags found
1 merge request!112Feature extractors
Showing
with 995 additions and 4 deletions
...@@ -58,6 +58,7 @@ from .Base import Base ...@@ -58,6 +58,7 @@ from .Base import Base
from .bobipfacedetect import BobIpFacedetect from .bobipfacedetect import BobIpFacedetect
from .bobipflandmark import BobIpFlandmark from .bobipflandmark import BobIpFlandmark
from .bobipmtcnn import BobIpMTCNN from .bobipmtcnn import BobIpMTCNN
from .bobiptinyface import BobIpTinyface
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
...@@ -84,6 +85,7 @@ __appropriate__( ...@@ -84,6 +85,7 @@ __appropriate__(
BobIpFacedetect, BobIpFacedetect,
BobIpFlandmark, BobIpFlandmark,
BobIpMTCNN, BobIpMTCNN,
BobIpTinyface,
) )
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith('_')]
import bob.ip.facedetect.tinyface
from . import Base
import cv2 as cv
class BobIpTinyface(Base):
"""Annotator using tinyface in bob.ip.facedetect"""
def __init__(self, **kwargs):
super(BobIpTinyface, self).__init__(**kwargs)
self.tinyface = bob.ip.facedetect.tinyface.TinyFacesDetector(prob_thresh=0.5)
def annotate(self, image, **kwargs):
"""Annotates an image using tinyface
Parameters
----------
image : numpy.array
An RGB image in Bob format.
**kwargs
Ignored.
Returns
-------
dict
Annotations with (topleft, bottomright) keys (or None).
"""
annotations = self.tinyface.detect(image)
if annotations is not None:
r = annotations[0]
return {'topleft':(r[0],r[1]), 'bottomright':(r[2],r[3])}
else:
return None
from bob.bio.face.annotator import BobIpTinyface
annotator = BobIpTinyface()
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import mxnet_model
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions={'leye':(49,72), 'reye':(49,38)}
preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),))
extractor_transformer = mxnet_model()
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
# Chain the Transformers together
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
from bob.bio.face.extractor import opencv_model
from bob.bio.base.extractor import Extractor
from bob.bio.base.transformers import ExtractorTransformer
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions={"leye": (98, 144), "reye": (98, 76)}
#Preprocessor
preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={"leye": (98, 144), "reye": (98, 76)}, color_channel='rgb',fixed_positions=fixed_positions)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),))
#Extractor
extractor_transformer = opencv_model()
#Algorithm
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
## Creation of the pipeline
# Chain the Transformers together
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import pytorch_loaded_model
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions={'leye':(49,72), 'reye':(49,38)}
preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),))
extractor_transformer = pytorch_loaded_model()
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
# Chain the Transformers together
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import pytorch_library_model
from facenet_pytorch import InceptionResnetV1
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions={'leye':(49,72), 'reye':(49,38)}
preprocessor_transformer = FaceCrop(cropped_image_size=(224,224), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),))
model = InceptionResnetV1(pretrained='vggface2').eval()
extractor_transformer = pytorch_library_model(model=model)
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
# Chain the Transformers together
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import tf_model
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
# Preprocessor
cropped_positions={'leye':(49,72), 'reye':(49,38)}
preprocessor_transformer = FaceCrop(cropped_image_size=(160,160), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',fixed_positions=fixed_positions)
transform_extra_arguments = (None if (cropped_positions is None or fixed_positions is not None) else (("annotations", "annotations"),))
# Extractor
extractor_transformer = tf_model()
# Algorithm
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
# Chain the Transformers together
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer,transform_extra_arguments=transform_extra_arguments),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
from .DCTBlocks import DCTBlocks from .DCTBlocks import DCTBlocks
from .GridGraph import GridGraph from .GridGraph import GridGraph
from .LGBPHS import LGBPHS from .LGBPHS import LGBPHS
from .mxnet_resnet import mxnet_model
from .pytorch_model import pytorch_loaded_model
from .pytorch_model import pytorch_library_model
from .tf_model import tf_model
from .opencv_caffe import opencv_model
# gets sphinx autodoc done right - don't remove it # gets sphinx autodoc done right - don't remove it
def __appropriate__(*args): def __appropriate__(*args):
...@@ -20,5 +25,10 @@ __appropriate__( ...@@ -20,5 +25,10 @@ __appropriate__(
DCTBlocks, DCTBlocks,
GridGraph, GridGraph,
LGBPHS, LGBPHS,
mxnet_model,
pytorch_loaded_model,
pytorch_library_model,
tf_model,
opencv_model,
) )
__all__ = [_ for _ in dir() if not _.startswith('_')] __all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
"""Feature extraction resnet models using mxnet interface"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
import numpy as np
import pkg_resources
import os
import mxnet as mx
from mxnet import gluon
import warnings
from bob.extension import rc
mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
class mxnet_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under MxNet Interfaces.
Users can download the pretrained face recognition models with MxNet Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.mxnet`, and set config of the parameters to :py:class:`bob.extractor_weights.mxnet`.
.. code-block:: sh
$ bob config set bob.extractor_model.mxnet /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.mxnet /PATH/TO/WEIGHTS/
Examples: (Pretrained ResNet models): `LResNet100E-IR,ArcFace@ms1m-refine-v2 <https://github.com/deepinsight/insightface>`_
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.mxnet_resnet"] is None
else rc["bob.bio.face.models.mxnet_resnet"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
ctx = mx.gpu() if self.use_gpu else mx.cpu()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx)
self.model = deserialized_net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
X = check_array(X, allow_nd=True)
X = mx.nd.array(X)
return self.model(X,).asnumpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
import cv2
import numpy as np
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from bob.extension import rc
from functools import partial
import pkg_resources
import os
from PIL import Image
opencv_model_directory = rc["bob.extractor_model.opencv"]
opencv_model_prototxt = rc["bob.extractor_weights.opencv"]
class opencv_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under OpenCV Interface
Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`.
.. code-block:: sh
$ bob config set bob.extractor_model.opencv /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.opencv /PATH/TO/WEIGHTS/
The extracted features can be combined with different the algorithms.
.. note::
This structure only can be used for CAFFE pretrained model.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "opencv_model"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.opencv"] is None
else rc["bob.bio.face.models.opencv"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory)
self.model = net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
img = np.array(X)
self.model.setInput(img)
return self.model.forward()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import torch
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from bob.extension import rc
from functools import partial
import pkg_resources
import os
import numpy as np
import imp
pytorch_model_directory = rc["bob.extractor_model.pytorch"]
pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
class pytorch_loaded_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`.
.. code-block:: sh
$ bob config set bob.extractor_model.pytorch /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.pytorchmodel"] is None
else rc["bob.bio.face.models.pytorchmodel"]
)
self.checkpoint_path = checkpoint_path
self.device = None
def _load_model(self):
MainModel = imp.load_source('MainModel', pytorch_model_directory)
network = torch.load(pytorch_weight_directory)
network.eval()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
network.to(self.device)
self.model = network
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
X = torch.Tensor(X)
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
class pytorch_library_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition with registered model frames in the PyTorch Library.
Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
The extracted features can be combined with different the algorithms.
**Parameters:**
model: pytorch model calling from library.
use_gpu: True or False.
"""
def __init__(self, model=None, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = model
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.pytorchmodel"] is None
else rc["bob.bio.face.models.pytorchmodel"]
)
self.checkpoint_path = checkpoint_path
self.device = None
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
X = torch.Tensor(X)
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import tensorflow as tf
from bob.extension import rc
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from tensorflow.keras import Sequential
from tensorflow.keras.layers.experimental import preprocessing
from functools import partial
import pkg_resources
import os
import numpy as np
from tensorflow import keras
tf_model_directory = rc["bob.extractor_model.tf"]
class tf_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under TensorFlow Interface.
Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`.
.. code-block:: sh
$ bob config set bob.extractor_model.tf /PATH/TO/MODEL/
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.tfmodel"] is None
else rc["bob.bio.face.models.tfmodel"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
model = tf.keras.models.load_model(tf_model_directory)
self.model = model
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
X = check_array(X, allow_nd=True)
X = tf.convert_to_tensor(X)
X = to_channels_last(X)
predict = self.model.predict(X)
return predict
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
...@@ -56,3 +56,17 @@ Deep learning baselines ...@@ -56,3 +56,17 @@ Deep learning baselines
* ``inception-resnetv1-casiawebface``: Inception Resnet v1 model trained using the Casia Web dataset in the context of the work published by [TFP18]_ * ``inception-resnetv1-casiawebface``: Inception Resnet v1 model trained using the Casia Web dataset in the context of the work published by [TFP18]_
* ``arcface-insightface``: Arcface model from `Insightface <https://github.com/deepinsight/insightface>`_ * ``arcface-insightface``: Arcface model from `Insightface <https://github.com/deepinsight/insightface>`_
Deep Learning with different interfaces baselines
=================================================
* ``mxnet_pipe``: Arcface Resnet Model using MxNet Interfaces from `Insightface <https://github.com/deepinsight/insightface>`_
* ``pytorch_pipe_v1``: Pytorch network that extracs 1000-dimensional featrues, trained by Manual Gunther, as described in [LGB18]_
* ``pytorch_pipe_v2``: Inception Resnet face recognition model from `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
* ``tf_pipe``: Inception Resnet v2 model trained using the MSCeleb dataset in the context of the work published by [TFP18]_
* ``opencv_pipe``: VGG Face descriptor pretrained models, i.e. `Caffe model <https://www.robots.ox.ac.uk/~vgg/software/vgg_face/>`_
...@@ -13,7 +13,6 @@ Databases ...@@ -13,7 +13,6 @@ Databases
.. autosummary:: .. autosummary::
bob.bio.face.database.ARFaceBioDatabase bob.bio.face.database.ARFaceBioDatabase
bob.bio.face.database.AtntBioDatabase bob.bio.face.database.AtntBioDatabase
bob.bio.face.database.CasiaAfricaDatabase
bob.bio.face.database.MobioDatabase bob.bio.face.database.MobioDatabase
bob.bio.face.database.ReplayBioDatabase bob.bio.face.database.ReplayBioDatabase
bob.bio.face.database.ReplayMobileBioDatabase bob.bio.face.database.ReplayMobileBioDatabase
...@@ -35,6 +34,7 @@ Face Image Annotators ...@@ -35,6 +34,7 @@ Face Image Annotators
bob.bio.face.annotator.BobIpFacedetect bob.bio.face.annotator.BobIpFacedetect
bob.bio.face.annotator.BobIpFlandmark bob.bio.face.annotator.BobIpFlandmark
bob.bio.face.annotator.BobIpMTCNN bob.bio.face.annotator.BobIpMTCNN
bob.bio.face.annotator.BobIpTinyface
Image Preprocessors Image Preprocessors
...@@ -57,7 +57,11 @@ Image Feature Extractors ...@@ -57,7 +57,11 @@ Image Feature Extractors
bob.bio.face.extractor.DCTBlocks bob.bio.face.extractor.DCTBlocks
bob.bio.face.extractor.GridGraph bob.bio.face.extractor.GridGraph
bob.bio.face.extractor.LGBPHS bob.bio.face.extractor.LGBPHS
bob.bio.face.extractor.mxnet_model
bob.bio.face.extractor.pytorch_loaded_model
bob.bio.face.extractor.pytorch_library_model
bob.bio.face.extractor.tf_model
bob.bio.face.extractor.opencv_model
Face Recognition Algorithms Face Recognition Algorithms
~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~
......
...@@ -17,3 +17,4 @@ References ...@@ -17,3 +17,4 @@ References
.. [ZSQ09] *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009. .. [ZSQ09] *W. Zhang, S. Shan, L. Qing, X. Chen and W. Gao*. **Are Gabor phases really useless for face recognition?** Pattern Analysis & Applications, 12:301-307, 2009.
.. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816. .. [TFP18] de Freitas Pereira, Tiago, André Anjos, and Sébastien Marcel. "Heterogeneous face recognition using domain specific units." IEEE Transactions on Information Forensics and Security 14.7 (2018): 1803-1816.
.. [HRM06] *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006. .. [HRM06] *G. Heusch, Y. Rodriguez, and S. Marcel*. **Local Binary Patterns as an Image Preprocessing for Face Authentication**. In IEEE International Conference on Automatic Face and Gesture Recognition (AFGR), 2006.
.. [LGB18] *C. Li, M. Gunther and T. E. Boult*. **ECLIPSE: Ensembles of Centroids Leveraging Iteratively Processed Spatial Eclipse Clustering**. 2018 IEEE Winter Conference on Applications of Computer Vision (WACV), Lake Tahoe, NV, USA, 2018, pp. 131-140, doi: 10.1109/WACV.2018.00021.
...@@ -121,12 +121,14 @@ setup( ...@@ -121,12 +121,14 @@ setup(
"facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:annotator", "facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:annotator",
"flandmark = bob.bio.face.config.annotator.flandmark:annotator", "flandmark = bob.bio.face.config.annotator.flandmark:annotator",
"mtcnn = bob.bio.face.config.annotator.mtcnn:annotator", "mtcnn = bob.bio.face.config.annotator.mtcnn:annotator",
"tinyface = bob.bio.face.config.annotator.tinyface:annotator",
], ],
"bob.bio.transformer": [ "bob.bio.transformer": [
"facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:transformer", "facedetect-eye-estimate = bob.bio.face.config.annotator.facedetect_eye_estimate:transformer",
"facedetect = bob.bio.face.config.annotator.facedetect:transformer", "facedetect = bob.bio.face.config.annotator.facedetect:transformer",
"flandmark = bob.bio.face.config.annotator.flandmark:annotator", "flandmark = bob.bio.face.config.annotator.flandmark:annotator",
"mtcnn = bob.bio.face.config.annotator.mtcnn:transformer", "mtcnn = bob.bio.face.config.annotator.mtcnn:transformer",
"tinyface = bob.bio.face.config.annotator.tinyface:transformer",
"facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:transformer", "facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg:transformer",
"inception-resnetv1-casiawebface = bob.bio.face.config.baseline.inception_resnetv1_casiawebface:transformer", "inception-resnetv1-casiawebface = bob.bio.face.config.baseline.inception_resnetv1_casiawebface:transformer",
"inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer", "inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer",
...@@ -136,6 +138,11 @@ setup( ...@@ -136,6 +138,11 @@ setup(
"gabor-graph = bob.bio.face.config.baseline.gabor_graph:transformer", "gabor-graph = bob.bio.face.config.baseline.gabor_graph:transformer",
"lgbphs = bob.bio.face.config.baseline.lgbphs:transformer", "lgbphs = bob.bio.face.config.baseline.lgbphs:transformer",
"dummy = bob.bio.face.config.baseline.dummy:transformer", "dummy = bob.bio.face.config.baseline.dummy:transformer",
"mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:transformer",
"pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:transformer",
"pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:transformer",
"tf-pipe = bob.bio.face.config.baseline.ty_pipe:transformer",
"opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:transformer",
], ],
# baselines # baselines
"bob.bio.pipeline": [ "bob.bio.pipeline": [
...@@ -150,8 +157,12 @@ setup( ...@@ -150,8 +157,12 @@ setup(
"lda = bob.bio.face.config.baseline.lda:pipeline", "lda = bob.bio.face.config.baseline.lda:pipeline",
"dummy = bob.bio.face.config.baseline.dummy:pipeline", "dummy = bob.bio.face.config.baseline.dummy:pipeline",
"resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline", "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline",
"resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021:pipeline",
"mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021", "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
"mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:pipeline",
"pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:pipeline",
"pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:pipeline",
"tf-pipe = bob.bio.face.config.baseline.ty_pipe:pipeline",
"opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:pipeline",
], ],
"bob.bio.config": [ "bob.bio.config": [
"facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg", "facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg",
...@@ -163,6 +174,11 @@ setup( ...@@ -163,6 +174,11 @@ setup(
"arcface-insightface = bob.bio.face.config.baseline.arcface_insightface", "arcface-insightface = bob.bio.face.config.baseline.arcface_insightface",
"lgbphs = bob.bio.face.config.baseline.lgbphs", "lgbphs = bob.bio.face.config.baseline.lgbphs",
"lda = bob.bio.face.config.baseline.lda", "lda = bob.bio.face.config.baseline.lda",
"mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe",
"pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1",
"pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2",
"tf-pipe = bob.bio.face.config.baseline.ty_pipe",
"opencv-pipe = bob.bio.face.config.baseline.opencv_pipe",
"arface = bob.bio.face.config.database.arface", "arface = bob.bio.face.config.database.arface",
"atnt = bob.bio.face.config.database.atnt", "atnt = bob.bio.face.config.database.atnt",
"gbu = bob.bio.face.config.database.gbu", "gbu = bob.bio.face.config.database.gbu",
...@@ -184,7 +200,6 @@ setup( ...@@ -184,7 +200,6 @@ setup(
"pola-thermal = bob.bio.face.config.database.pola_thermal", "pola-thermal = bob.bio.face.config.database.pola_thermal",
"cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2", "cbsr-nir-vis-2 = bob.bio.face.config.database.cbsr_nir_vis_2",
"resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021", "resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021",
"resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021",
"mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021", "mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
], ],
"bob.bio.cli": [ "bob.bio.cli": [
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment