Commit 60c531cb authored by Yu LINGHU's avatar Yu LINGHU Committed by Tiago de Freitas Pereira
Browse files

clone the updates and recommit

parent 16372b9d
......@@ -29,6 +29,8 @@ class BobIpTinyface(Base):
annotations = self.tinyface.detect(image)
if annotations is not None:
return annotations[0]
r = annotations[0]
return {"topleft": (r[0], r[1]), "bottomright": (r[2], r[3])}
else:
return None
from bob.bio.face.annotator import BobIpTinyface
annotator = BobIpTinyface()
\ No newline at end of file
annotator = BobIpTinyface()
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import MxNetModel
from bob.bio.face.extractor import mxnet_model
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
......@@ -39,7 +39,7 @@ transform_extra_arguments = (
)
extractor_transformer = MxNetModel()
extractor_transformer = mxnet_model()
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
......
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
from bob.bio.face.extractor import OpenCVModel
from bob.bio.face.extractor import opencv_model
from bob.bio.base.extractor import Extractor
from bob.bio.base.transformers import ExtractorTransformer
from bob.bio.base.algorithm import Distance
......@@ -41,12 +41,9 @@ transform_extra_arguments = (
else (("annotations", "annotations"),)
)
# Extractor
weights = None # PATH/TO/WEIGHTS
config = None # PATH/TO/CONFIG
extractor_transformer = OpenCVModel(weights=weights, config=config)
# Extractor
extractor_transformer = opencv_model()
# Algorithm
......
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import PyTorchLoadedModel
from bob.bio.face.extractor import pytorch_loaded_model
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
......@@ -23,11 +23,11 @@ else:
fixed_positions = None
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (110, 144), "reye": (110, 96)},
cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
......@@ -39,7 +39,7 @@ transform_extra_arguments = (
)
extractor_transformer = PyTorchLoadedModel()
extractor_transformer = pytorch_loaded_model()
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
......@@ -61,3 +61,4 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import PyTorchLibraryModel
from bob.bio.face.extractor import pytorch_library_model
from facenet_pytorch import InceptionResnetV1
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
......@@ -24,11 +24,11 @@ else:
fixed_positions = None
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (110, 144), "reye": (110, 96)},
cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
......@@ -41,7 +41,7 @@ transform_extra_arguments = (
model = InceptionResnetV1(pretrained="vggface2").eval()
extractor_transformer = PyTorchLibraryModel(model=model)
extractor_transformer = pytorch_library_model(model=model)
algorithm = Distance(
......@@ -64,3 +64,4 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import TensorFlowModel
from bob.bio.face.extractor import tf_model
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
......@@ -24,11 +24,11 @@ else:
# Preprocessor
cropped_positions = {"leye": (80, 100), "reye": (80, 60)}
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(160, 160),
cropped_positions={"leye": (80, 100), "reye": (80, 60)},
cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
......@@ -41,7 +41,7 @@ transform_extra_arguments = (
# Extractor
extractor_transformer = TensorFlowModel()
extractor_transformer = tf_model()
# Algorithm
algorithm = Distance(
......@@ -64,3 +64,4 @@ transformer = make_pipeline(
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
from .DCTBlocks import DCTBlocks
from .GridGraph import GridGraph
from .LGBPHS import LGBPHS
from .MxNetModel import MxNetModel
from .PyTorchModel import PyTorchLoadedModel
from .PyTorchModel import PyTorchLibraryModel
from .TensorFlowModel import TensorFlowModel
from .OpenCVModel import OpenCVModel
from .mxnet_resnet import mxnet_model
from .pytorch_model import pytorch_loaded_model
from .pytorch_model import pytorch_library_model
from .tf_model import tf_model
from .opencv_caffe import opencv_model
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
......@@ -27,10 +27,10 @@ __appropriate__(
DCTBlocks,
GridGraph,
LGBPHS,
MxNetModel,
PyTorchLoadedModel,
PyTorchLibraryModel,
TensorFlowModel,
OpenCVModel,
mxnet_model,
pytorch_loaded_model,
pytorch_library_model,
tf_model,
opencv_model,
)
__all__ = [_ for _ in dir() if not _.startswith("_")]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
"""Feature extraction resnet models using mxnet interface"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
import numpy as np
import pkg_resources
import os
import mxnet as mx
from mxnet import gluon
import warnings
from bob.extension import rc
mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
class mxnet_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under MxNet Interfaces.
Users can download the pretrained face recognition models with MxNet Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.mxnet`, and set config of the parameters to :py:class:`bob.extractor_weights.mxnet`.
.. code-block:: sh
$ bob config set bob.extractor_model.mxnet /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.mxnet /PATH/TO/WEIGHTS/
Examples: (Pretrained ResNet models): `LResNet100E-IR,ArcFace@ms1m-refine-v2 <https://github.com/deepinsight/insightface>`_
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.mxnet_resnet"] is None
else rc["bob.bio.face.models.mxnet_resnet"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
ctx = mx.gpu() if self.use_gpu else mx.cpu()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
deserialized_net = gluon.nn.SymbolBlock.imports(mxnet_resnet_directory, ['data'], mxnet_weight_directory, ctx=ctx)
self.model = deserialized_net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
X = check_array(X, allow_nd=True)
X = mx.nd.array(X)
return self.model(X,).asnumpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
import cv2
import numpy as np
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from bob.extension import rc
from functools import partial
import pkg_resources
import os
from PIL import Image
opencv_model_directory = rc["bob.extractor_model.opencv"]
opencv_model_prototxt = rc["bob.extractor_weights.opencv"]
class opencv_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under OpenCV Interface
Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`.
.. code-block:: sh
$ bob config set bob.extractor_model.opencv /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.opencv /PATH/TO/WEIGHTS/
The extracted features can be combined with different the algorithms.
.. note::
This structure only can be used for CAFFE pretrained model.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "opencv_model"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.opencv"] is None
else rc["bob.bio.face.models.opencv"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt,opencv_model_directory)
self.model = net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
img = np.array(X)
self.model.setInput(img)
return self.model.forward()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import torch
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from bob.extension import rc
from functools import partial
import pkg_resources
import os
import numpy as np
import imp
pytorch_model_directory = rc["bob.extractor_model.pytorch"]
pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
class pytorch_loaded_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`.
.. code-block:: sh
$ bob config set bob.extractor_model.pytorch /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.pytorchmodel"] is None
else rc["bob.bio.face.models.pytorchmodel"]
)
self.checkpoint_path = checkpoint_path
self.device = None
def _load_model(self):
MainModel = imp.load_source('MainModel', pytorch_model_directory)
network = torch.load(pytorch_weight_directory)
network.eval()
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
network.to(self.device)
self.model = network
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self.load_model()
X = torch.Tensor(X)
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
class pytorch_library_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition with registered model frames in the PyTorch Library.
Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
The extracted features can be combined with different the algorithms.
**Parameters:**
model: pytorch model calling from library.
use_gpu: True or False.
"""
def __init__(self, model=None, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = model
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.pytorchmodel"] is None
else rc["bob.bio.face.models.pytorchmodel"]
)
self.checkpoint_path = checkpoint_path
self.device = None
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
X = torch.Tensor(X)
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import tensorflow as tf
from bob.extension import rc
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from tensorflow.keras import Sequential
from tensorflow.keras.layers.experimental import preprocessing
from functools import partial
import pkg_resources
import os
import numpy as np
from tensorflow import keras
tf_model_directory = rc["bob.extractor_model.tf"]
class tf_model(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under TensorFlow Interface.
Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`.
.. code-block:: sh
$ bob config set bob.extractor_model.tf /PATH/TO/MODEL/
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path