Skip to content
Snippets Groups Projects
Commit 9ad9020d authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Updated pytorch models and opencv based models

parent fda2c82d
No related branches found
No related tags found
1 merge request!112Feature extractors
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import PyTorchLoadedModel
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
from bob.bio.face.embeddings.pytorch import AFFFE_2021
from bob.pipelines import wrap
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
......@@ -22,16 +21,14 @@ else:
annotation_type = None
fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (110, 144), "reye": (110, 96)},
cropped_positions=cropped_positions,
color_channel="rgb",
fixed_positions=fixed_positions,
allow_upside_down_normalized_faces=True,
)
transform_extra_arguments = (
......@@ -40,20 +37,13 @@ transform_extra_arguments = (
else (("annotations", "annotations"),)
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
extractor_transformer = PyTorchLoadedModel()
extractor_transformer = AFFFE_2021()
# Algorithm
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together
transformer = make_pipeline(
wrap(
......
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.base.transformers.preprocessor import PreprocessorTransformer
from bob.bio.face.extractor import OpenCVModel
from bob.bio.base.extractor import Extractor
from bob.bio.base.transformers import ExtractorTransformer
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
from bob.bio.face.embeddings.opencv import VGG16_Oxford
from bob.pipelines import wrap
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
......@@ -25,23 +21,13 @@ else:
annotation_type = None
fixed_positions = None
cropped_positions = {"leye": (98, 144), "reye": (98, 76)}
# Preprocessor
cropped_positions = {"leye": (100, 140), "reye": (100, 95)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (98, 144), "reye": (98, 76)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
cropped_positions = {"leye": (98, 144), "reye": (98, 76)}
# Preprocessor
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (98, 144), "reye": (98, 76)},
cropped_positions=cropped_positions,
color_channel="rgb",
fixed_positions=fixed_positions,
allow_upside_down_normalized_faces=True,
)
transform_extra_arguments = (
......@@ -52,10 +38,7 @@ transform_extra_arguments = (
# Extractor
weights = None # PATH/TO/WEIGHTS
config = None # PATH/TO/CONFIG
extractor_transformer = OpenCVModel(weights=weights, config=config)
extractor_transformer = VGG16_Oxford()
# Algorithm
......@@ -63,9 +46,6 @@ algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
## Creation of the pipeline
# Chain the Transformers together
transformer = make_pipeline(
wrap(
......
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import bob.bio.base
import numpy as np
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
import os
from bob.extension.download import get_file
class OpenCVTransformer(TransformerMixin, BaseEstimator):
"""
Base Transformer using the OpenCV interface.
.. note::
This class supports Caffe ``.caffemodel``, Tensorflow ``.pb``, Torch ``.t7`` ``.net``, Darknet ``.weights``, DLDT ``.bin``, and ONNX ``.onnx``
Parameters
----------
checkpoint_path: str
Path containing the checkpoint
config:
Path containing some configuration file (e.g. .json, .prototxt)
"""
def __init__(self, checkpoint_path=None, config=None, **kwargs):
super().__init__(**kwargs)
self.checkpoint_path = checkpoint_path
self.config = config
self.model = None
def _load_model(self):
import cv2
net = cv2.dnn.readNet(self.checkpoint_path, self.config)
self.model = net
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
X : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
import cv2
if self.model is None:
self._load_model()
import ipdb
ipdb.set_trace()
img = np.array(X)
img = img / 255
self.model.setInput(img)
return self.model.forward()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
class VGG16_Oxford(OpenCVTransformer):
"""
Original VGG16 model from the paper: https://www.robots.ox.ac.uk/~vgg/publications/2015/Parkhi15/parkhi15.pdf
"""
def __init__(self):
urls = [
"https://www.robots.ox.ac.uk/~vgg/software/vgg_face/src/vgg_face_caffe.tar.gz",
"http://bobconda.lab.idiap.ch/public-upload/data/bob/bob.bio.face/master/caffe/vgg_face_caffe.tar.gz",
]
filename = get_file(
"vgg_face_caffe.tar.gz",
urls,
cache_subdir="data/caffe/vgg_face_caffe",
file_hash="ee707ac6e890bc148cb155adeaad12be",
extract=True,
)
path = os.path.dirname(filename)
config = os.path.join(path, "vgg_face_caffe", "VGG_FACE_deploy.prototxt")
checkpoint_path = os.path.join(path, "vgg_face_caffe", "VGG_FACE.caffemodel")
super(VGG16_Oxford, self).__init__(checkpoint_path, config)
def _load_model(self):
import cv2
net = cv2.dnn.readNet(self.checkpoint_path, self.config)
self.model = net
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Yu Linghu & Xinyi Zhang <yu.linghu@uzh.ch, xinyi.zhang@uzh.ch>
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
import numpy as np
import imp
import os
from bob.extension.download import get_file
class PyTorchModel(TransformerMixin, BaseEstimator):
"""
"""
def __init__(self, checkpoint_path=None, config=None, **kwargs):
super().__init__(**kwargs)
self.checkpoint_path = checkpoint_path
self.config = config
self.model = None
def transform(self, X):
"""__call__(image) -> feature
Extracts the features from the given image.
**Parameters:**
image : 2D :py:class:`numpy.ndarray` (floats)
The image to extract the features from.
**Returns:**
feature : 2D or 3D :py:class:`numpy.ndarray` (floats)
The list of features extracted from the image.
"""
import torch
if self.model is None:
self._load_model()
X = check_array(X, allow_nd=True)
X = torch.Tensor(X)
X = X / 255
return self.model(X).detach().numpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
class AFFFE_2021(PyTorchModel):
"""
AFFFE from https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz
"""
def __init__(self):
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz",
"http://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/pytorch/AFFFE-42a53f19.tar.gz",
]
filename = get_file(
"AFFFE-42a53f19.tar.gz",
urls,
cache_subdir="data/pytorch/AFFFE-42a53f19.tar.gz",
file_hash="1358bbcda62cb59b85b2418ef1f81e9b",
extract=True,
)
path = os.path.dirname(filename)
config = os.path.join(path, "AFFFE.py")
checkpoint_path = os.path.join(path, "AFFFE.pth")
super(AFFFE_2021, self).__init__(checkpoint_path, config)
def _load_model(self):
import torch
MainModel = imp.load_source("MainModel", self.config)
network = torch.load(self.checkpoint_path)
network.eval()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
network.to(device)
self.model = network
......@@ -66,6 +66,7 @@ def run_baseline(baseline, samples_for_training=[], target_scores=None):
# Regular pipeline
pipeline = load_resource(baseline, "pipeline")
scores = pipeline(samples_for_training, biometric_references, probes)
assert len(scores) == 1
assert len(scores[0]) == 1
......@@ -174,11 +175,14 @@ def test_opencv_pipe():
@pytest.mark.slow
@is_library_available("torch")
def test_pytorch_pipe_v1():
run_baseline("pytorch-pipe-v1", target_scores=None)
def test_afffe():
run_baseline("afffe", target_scores=-0.7397219061544165)
@pytest.mark.slow
@is_library_available("torch")
def test_pytorch_pipe_v2():
run_baseline("pytorch-pipe-v2", target_scores=None)
@is_library_available("cv2")
def test_vgg16_oxford():
import ipdb
ipdb.set_trace()
run_baseline("vgg16-oxford", target_scores=None)
......@@ -136,12 +136,9 @@ setup(
"gabor-graph = bob.bio.face.config.baseline.gabor_graph:transformer",
"lgbphs = bob.bio.face.config.baseline.lgbphs:transformer",
"dummy = bob.bio.face.config.baseline.dummy:transformer",
"mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:transformer",
"mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:transformer",
"pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:transformer",
"pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:transformer",
"tf-pipe = bob.bio.face.config.baseline.tf_pipe:transformer",
"opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:transformer",
"afffe = bob.bio.face.config.baseline.afffe:transformer",
"vgg16-oxford = bob.bio.face.config.baseline.vgg16_oxford:transformer",
],
# baselines
"bob.bio.pipeline": [
......@@ -158,12 +155,9 @@ setup(
"resnet50-msceleb-arcface-2021 = bob.bio.face.config.baseline.resnet50_msceleb_arcface_2021:pipeline",
"resnet50-vgg2-arcface-2021 = bob.bio.face.config.baseline.resnet50_vgg2_arcface_2021:pipeline",
"mobilenetv2-msceleb-arcface-2021 = bob.bio.face.config.baseline.mobilenetv2_msceleb_arcface_2021",
"mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe:pipeline",
"mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface:pipeline",
"pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1:pipeline",
"pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2:pipeline",
"tf-pipe = bob.bio.face.config.baseline.tf_pipe:pipeline",
"opencv-pipe = bob.bio.face.config.baseline.opencv_pipe:pipeline",
"afffe = bob.bio.face.config.baseline.afffe:pipeline",
"vgg16-oxford = bob.bio.face.config.baseline.vgg16_oxford:pipeline",
],
"bob.bio.config": [
"facenet-sanderberg = bob.bio.face.config.baseline.facenet_sanderberg",
......@@ -177,10 +171,8 @@ setup(
"lda = bob.bio.face.config.baseline.lda",
"mxnet-pipe = bob.bio.face.config.baseline.mxnet_pipe",
"mxnet-tinyface = bob.bio.face.config.baseline.mxnet_tinyface",
"pytorch-pipe-v1 = bob.bio.face.config.baseline.pytorch_pipe_v1",
"pytorch-pipe-v2 = bob.bio.face.config.baseline.pytorch_pipe_v2",
"tf-pipe = bob.bio.face.config.baseline.tf_pipe",
"opencv-pipe = bob.bio.face.config.baseline.opencv_pipe",
"afffe = bob.bio.face.config.baseline.afffe",
"vgg16-oxford = bob.bio.face.config.baseline.vgg16_oxford",
"arface = bob.bio.face.config.database.arface",
"atnt = bob.bio.face.config.database.atnt",
"gbu = bob.bio.face.config.database.gbu",
......@@ -219,4 +211,4 @@ setup(
"Programming Language :: Python",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
)
\ No newline at end of file
)
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment