Skip to content
Snippets Groups Projects
Commit db921441 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Removed old traces

parent 4962ab4f
Branches
Tags
1 merge request!112Feature extractors
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
"""Feature extraction resnet models using mxnet interface"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
import numpy as np
import pkg_resources
import os
import mxnet as mx
from mxnet import gluon
import warnings
from bob.extension import rc
mxnet_resnet_directory = rc["bob.extractor_model.mxnet"]
mxnet_weight_directory = rc["bob.extractor_weights.mxnet"]
class MxNetModel(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under MxNet Interfaces.
Users can download the pretrained face recognition models with MxNet Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.mxnet`, and set config of the parameters to :py:class:`bob.extractor_weights.mxnet`.
.. code-block:: sh
$ bob config set bob.extractor_model.mxnet /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.mxnet /PATH/TO/WEIGHTS/
Examples: (Pretrained ResNet models): `LResNet100E-IR,ArcFace@ms1m-refine-v2 <https://github.com/deepinsight/insightface>`_
The extracted features can be combined with different the algorithms.
**Parameters:**
use_gpu: True or False.
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.mxnet_resnet"] is None
else rc["bob.bio.face.models.mxnet_resnet"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
ctx = mx.gpu() if self.use_gpu else mx.cpu()
with warnings.catch_warnings():
warnings.simplefilter("ignore")
deserialized_net = gluon.nn.SymbolBlock.imports(
mxnet_resnet_directory, ["data"], mxnet_weight_directory, ctx=ctx
)
self.model = deserialized_net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D, 3D, or 4D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self._load_model()
X = check_array(X, allow_nd=True)
X = mx.nd.array(X)
return self.model(X,).asnumpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
import cv2
import numpy as np
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from bob.extension import rc
from functools import partial
import pkg_resources
import os
from PIL import Image
opencv_model_directory = rc["bob.extractor_model.opencv"]
opencv_model_prototxt = rc["bob.extractor_weights.opencv"]
class OpenCVModel(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under OpenCV Interface
Users can download the pretrained face recognition models with OpenCV Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.opencv`, and set config of the parameters to :py:class:`bob.extractor_weights.opencv`.
.. code-block:: sh
$ bob config set bob.extractor_model.opencv /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.opencv /PATH/TO/WEIGHTS/
The extracted features can be combined with different the algorithms.
.. note::
This structure only can be used for CAFFE pretrained model.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = None
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "opencv_model"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.opencv"] is None
else rc["bob.bio.face.models.opencv"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
net = cv2.dnn.readNetFromCaffe(opencv_model_prototxt, opencv_model_directory)
self.model = net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self._load_model()
img = np.array(X)
img = img / 255
self.model.setInput(img)
return self.model.forward()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import torch
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from bob.extension import rc
from functools import partial
import pkg_resources
import os
import numpy as np
import imp
pytorch_model_directory = rc["bob.extractor_model.pytorch"]
pytorch_weight_directory = rc["bob.extractor_weights.pytorch"]
class PyTorchLoadedModel(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under PyTorch Interface, especially for the models and weights that need to load by hand.
Users can download the pretrained face recognition models with PyTorch Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model frame to :py:class:`bob.extractor_model.pytorch`, and set config of the parameters to :py:class:`bob.extractor_weights.pytorch`.
.. code-block:: sh
$ bob config set bob.extractor_model.pytorch /PATH/TO/MODEL/
$ bob config set bob.extractor_weights.pytorch /PATH/TO/WEIGHTS/
The extracted features can be combined with different the algorithms.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
<<<<<<< HEAD:bob/bio/face/extractor/PyTorchModel.py~HEAD
self.model = None
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.pytorchmodel"] is None
else rc["bob.bio.face.models.pytorchmodel"]
)
=======
self.weights = weights
self.config = config
if self.config == None and self.weights == None:
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "AFFFE-42a53f19", ""),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.AFFFE"] is None
else rc["bob.bio.face.models.AFFFE"]
)
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz"
]
download_model(checkpoint_path, urls, "AFFFE-42a53f19.tar.gz")
>>>>>>> Update bob/bio/face/extractor/__init__.py, bob/bio/face/test/test_extractors.py, bob/bio/face/embeddings/PyTorchModel.py files:bob/bio/face/embeddings/PyTorchModel.py
self.checkpoint_path = checkpoint_path
self.device = None
def _load_model(self):
MainModel = imp.load_source("MainModel", pytorch_model_directory)
network = torch.load(pytorch_weight_directory)
network.eval()
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
network.to(self.device)
self.model = network
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self._load_model()
X = torch.Tensor(X)
X = X / 255
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
class PyTorchLibraryModel(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition with registered model frames in the PyTorch Library.
Users can import the pretrained face recognition models from PyTorch library. The model should be called in the pipeline. Example: `facenet_pytorch <https://github.com/timesler/facenet-pytorch>`_
The extracted features can be combined with different the algorithms.
**Parameters:**
model: pytorch model calling from library.
"""
def __init__(self, model=None, **kwargs):
super().__init__(**kwargs)
self.model = model
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.pytorchmodel"] is None
else rc["bob.bio.face.models.pytorchmodel"]
)
self.checkpoint_path = checkpoint_path
self.device = None
def _load_model(self):
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model.to(self.device)
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self._load_model()
X = torch.Tensor(X)
X = X / 255
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
import tensorflow as tf
from bob.extension import rc
from bob.learn.tensorflow.utils.image import to_channels_last
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
from tensorflow.keras import Sequential
from tensorflow.keras.layers.experimental import preprocessing
from functools import partial
import pkg_resources
import os
import numpy as np
from tensorflow import keras
tf_model_directory = rc["bob.extractor_model.tf"]
class TensorFlowModel(TransformerMixin, BaseEstimator):
"""Extracts features using deep face recognition models under TensorFlow Interface.
Users can download the pretrained face recognition models with TensorFlow Interface. The path to downloaded models should be specified before running the extractor (usually before running the pipeline file that includes the extractor). That is, set config of the model to :py:class:`bob.extractor_model.tf`.
.. code-block:: sh
$ bob config set bob.extractor_model.tf /PATH/TO/MODEL/
The extracted features can be combined with different the algorithms.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = None
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "resnet"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.tfmodel"] is None
else rc["bob.bio.face.models.tfmodel"]
)
self.checkpoint_path = checkpoint_path
def _load_model(self):
model = tf.keras.models.load_model(tf_model_directory)
self.model = model
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
if self.model is None:
self._load_model()
X = check_array(X, allow_nd=True)
X = tf.convert_to_tensor(X)
X = to_channels_last(X)
X = X / 255
predict = self.model.predict(X)
return predict
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment