Commit f55cf333 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Porting baselines

parent 144f7748
Pipeline #40477 failed with stage
in 14 minutes and 19 seconds
from bob.bio.face.embeddings import ArcFace_InsightFaceTF
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
......@@ -11,3 +13,11 @@ else:
transformer = embedding_transformer_112x112(ArcFace_InsightFaceTF(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline, BioAlgorithmLegacy
from bob.bio.face.helpers import face_crop_solver
import math
import numpy as np
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
import tempfile
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
# eye positions for frontal images
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "gray"
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper = face_cropper,
dtype = np.float64
)
#### FEATURE EXTRACTOR ######
gabor_graph = bob.bio.face.extractor.GridGraph(
# Gabor parameters
gabor_sigma=math.sqrt(2.0) * math.pi,
# what kind of information to extract
normalize_gabor_jets=True,
# setup of the fixed grid
node_distance=(8, 8),
)
transformer = make_pipeline(
wrap(
["sample"],
preprocessor,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], gabor_graph),
)
gabor_jet = bob.bio.face.algorithm.GaborJet(
gabor_jet_similarity_type="PhaseDiffPlusCanberra",
multiple_feature_scoring="max_jet",
gabor_sigma=math.sqrt(2.0) * math.pi,
)
tempdir = tempfile.TemporaryDirectory()
algorithm = BioAlgorithmLegacy(gabor_jet, base_dir=tempdir.name)
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
......@@ -11,3 +13,10 @@ else:
transformer = embedding_transformer_160x160(InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
......@@ -11,3 +13,10 @@ else:
transformer = embedding_transformer_160x160(InceptionResnetv1_MsCeleb(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
......@@ -11,3 +13,10 @@ else:
transformer = embedding_transformer_160x160(InceptionResnetv2_CasiaWebFace(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
......@@ -10,4 +12,11 @@ else:
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions)
\ No newline at end of file
transformer = embedding_transformer_160x160(InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
......@@ -129,11 +129,7 @@ class DCTBlocks(TransformerMixin, BaseEstimator):
return self.dct_features(image)
if isinstance(X, SampleBatch):
extracted = []
X = check_array(X, allow_nd=True)
for x in X:
extracted.append(_extract(x))
return extracted
return [_extract(x) for x in X]
else:
return _extract(X)
......
......@@ -73,7 +73,6 @@ class GridGraph(TransformerMixin, BaseEstimator):
first_node=None, # one or two integral values, or None -> automatically determined
):
self.gabor_directions = gabor_directions
self.gabor_scales = gabor_scales
self.gabor_sigma = gabor_sigma
......@@ -152,7 +151,7 @@ class GridGraph(TransformerMixin, BaseEstimator):
return self._aligned_graph
# check if a new extractor needs to be created
if self._last_image_resolution != image.shape:
if self._graph is None or self._last_image_resolution != image.shape:
self._last_image_resolution = image.shape
if self.first_node is None:
# automatically compute the first node
......@@ -203,6 +202,8 @@ class GridGraph(TransformerMixin, BaseEstimator):
"""
def _extract(image):
import ipdb; ipdb.set_trace()
assert image.ndim == 2
assert isinstance(image, numpy.ndarray)
image = image.astype(numpy.float64)
......@@ -223,13 +224,9 @@ class GridGraph(TransformerMixin, BaseEstimator):
return self.__class__.serialize_jets(jets)
if isinstance(X, SampleBatch):
extracted = []
X = check_array(X, allow_nd=True)
for x in X:
extracted.append(_extract(x))
return extracted
return [_extract(x) for x in X]
else:
return _extract(X)
return _extract(X)
def write_feature(self, feature, feature_file):
"""Writes the feature extracted by the `__call__` function to the given file.
......@@ -293,4 +290,4 @@ class GridGraph(TransformerMixin, BaseEstimator):
return {"stateless": True, "requires_fit": False}
def fit(self, X, y=None):
return self
\ No newline at end of file
return self
......@@ -276,11 +276,7 @@ class LGBPHS(TransformerMixin, BaseEstimator):
if isinstance(X, SampleBatch):
extracted = []
X = check_array(X, allow_nd=True)
for x in X:
extracted.append(_extract(x))
return extracted
return [_extract(x) for x in X]
else:
return _extract(X)
......
......@@ -44,6 +44,7 @@ class Base(TransformerMixin, BaseEstimator):
channel : 2D or 3D :py:class:`numpy.ndarray`
The extracted color channel.
"""
if image.ndim == 2:
if self.channel == "rgb":
return bob.ip.color.gray_to_rgb(image)
......
......@@ -22,7 +22,8 @@ import bob.ip.base
import numpy
from .Base import Base
from .utils import load_cropper
from sklearn.utils import check_array
from bob.pipelines.sample import SampleBatch
class INormLBP(Base):
"""Performs I-Norm LBP on the given image"""
......@@ -116,22 +117,20 @@ class INormLBP(Base):
The cropped and photometrically enhanced face.
"""
def _crop(image, annotations):
def _crop(image, annotations=None):
image = self.color_channel(image)
if self.cropper is not None:
if not isinstance(self.cropper, bob.bio.face.preprocessor.FaceCrop):
self.cropper = self.cropper()
image = self.cropper.crop_face(image, annotations)
image = self.cropper.transform(image, annotations=annotations)
image = self.lbp_extractor(image)
return self.data_type(image)
if isinstance(X, SampleBatch):
if annotations is None:
return [_crop(data) for data in X]
else:
return [_crop(data, annot) for data, annot in zip(X, annotations)]
if isinstance(annotations, list):
cropped_images = []
for img, annot in zip(X, annotations):
cropped_images.append(_crop(img, annot))
return cropped_images
else:
return _crop(X, annotations)
......
......@@ -31,7 +31,7 @@ class Scale(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
return self
def transform(self, X):
def transform(self, X, annotations=None):
"""
Resize an image given a shape
......@@ -44,16 +44,19 @@ class Scale(TransformerMixin, BaseEstimator):
target_img_size: tuple
Target image size
"""
"""
def _resize(x):
return resize(x, self.target_img_size, anti_aliasing=True)
X = check_array(X, allow_nd=True)
if X.ndim <= 3 and X.ndim >= 4:
if X.ndim < 2 or X.ndim > 4:
raise ValueError(f"Invalid image shape {X.shape}")
if X.ndim == 2:
# Checking if it's bob format CxHxW
return _resize(X)
if X.ndim == 3:
# Checking if it's bob format CxHxW
if X.shape[0] == 3:
......
......@@ -10,16 +10,11 @@ def load_cropper(face_cropper):
cropper = None
elif isinstance(face_cropper, six.string_types):
cropper = bob.bio.base.load_resource(face_cropper, "preprocessor")
elif isinstance(face_cropper, (FaceCrop, FaceDetect)):
cropper = face_cropper
# elif isinstance(face_cropper, (FaceCrop, FaceDetect)):
# cropper = face_cropper
else:
raise ValueError("The given face cropper type is not understood")
cropper = face_cropper
assert (
cropper is None
or isinstance(cropper, (FaceCrop, FaceDetect))
or isinstance(cropper, functools.partial)
)
return cropper
......
from bob.extension.config import load
import pkg_resources
import numpy as np
from bob.pipelines import Sample, SampleSet
from bob.pipelines import Sample, SampleSet, DelayedSample
from bob.bio.base import load_resource
from bob.bio.base.pipelines.vanilla_biometrics import checkpoint_vanilla_biometrics, dask_vanilla_biometrics
import tempfile
import os
import bob.io.base
import functools
import copy
def get_fake_sample_set(
face_size=(160, 160), eyes={"leye": (46, 107), "reye": (46, 53)}
):
images = dict()
images["bioref"] = (
pkg_resources.resource_filename("bob.bio.face.test", "data/testimage.jpg"),
{"reye": (131, 176), "leye": (222, 170)},
)
images["probe"] = (
pkg_resources.resource_filename("bob.bio.face.test", "data/ada.png"),
{"reye": (440, 207), "leye": (546, 207)},
)
def get_fake_sample_set(face_size=(160, 160), purpose="bioref"):
data = images[purpose][0]
annotations = images[purpose][1]
key = "1" if purpose == "bioref" else "2"
data = np.random.rand(3, 400, 400)
annotations = {"leye": (115, 267), "reye": (115, 132)}
return [
SampleSet(
[Sample(data, key="1", annotations=annotations)],
key="1",
subject="1",
[
DelayedSample(
load=functools.partial(bob.io.base.load, data),
key=key,
annotations=annotations,
)
],
key=key,
subject=key,
references=["1"],
)
]
def test_facenet_baseline():
def run_baseline(baseline):
biometric_references = get_fake_sample_set(purpose="bioref")
probes = get_fake_sample_set(purpose="probe")
biometric_references = get_fake_sample_set()
probes = get_fake_sample_set()
# Regular pipeline
pipeline = load_resource("facenet_sanderberg", "baseline")
pipeline = load_resource(baseline, "baseline")
scores = pipeline([], biometric_references, probes)
assert len(scores)==1
assert len(scores[0])==1
assert len(scores) == 1
assert len(scores[0]) == 1
# Regular with
# CHECKPOINTING
import ipdb; ipdb.set_trace()
with tempfile.TemporaryDirectory() as d:
# fake_sample = get_fake_sample()
checkpoint_pipeline = checkpoint_vanilla_biometrics(copy.deepcopy(pipeline), base_dir=d)
checkpoint_scores = checkpoint_pipeline([], biometric_references, probes)
assert len(checkpoint_scores) == 1
assert len(checkpoint_scores[0]) == 1
assert np.isclose(scores[0][0].data, checkpoint_scores[0][0].data)
# transformed_sample = transformer.transform([fake_sample])[0]
# transformed_data = transformed_sample.data
# assert transformed_sample.data.size == 128
dirs = os.listdir(d)
assert "biometric_references" in dirs
assert "samplewrapper-1" in dirs
assert "samplewrapper-2" in dirs
assert "scores" in dirs
def test_inception_resnetv2_msceleb():
transformer = load_resource("inception_resnetv2_msceleb", "baseline")
# DASK
with tempfile.TemporaryDirectory() as d:
fake_sample = get_fake_sample()
dask_pipeline = dask_vanilla_biometrics(checkpoint_vanilla_biometrics(copy.deepcopy(pipeline), base_dir=d))
dask_scores = dask_pipeline([], biometric_references, probes)
dask_scores = dask_scores.compute(scheduler="single-threaded")
assert len(dask_scores) == 1
assert len(dask_scores[0]) == 1
assert np.isclose(scores[0][0].data, dask_scores[0][0].data)
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
dirs = os.listdir(d)
assert "biometric_references" in dirs
assert "samplewrapper-1" in dirs
assert "samplewrapper-2" in dirs
assert "scores" in dirs
def test_inception_resnetv2_casiawebface():
transformer = load_resource("inception_resnetv2_casiawebface", "baseline")
def test_facenet_baseline():
run_baseline("facenet_sanderberg")
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
def test_inception_resnetv2_msceleb():
run_baseline("inception_resnetv2_msceleb")
def test_inception_resnetv1_msceleb():
transformer = load_resource("inception_resnetv1_msceleb", "baseline")
def test_inception_resnetv2_casiawebface():
run_baseline("inception_resnetv2_casiawebface")
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
def test_inception_resnetv1_msceleb():
run_baseline("inception_resnetv1_msceleb")
def test_inception_resnetv1_casiawebface():
transformer = load_resource("inception_resnetv1_casiawebface", "baseline")
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
run_baseline("inception_resnetv1_casiawebface")
def test_arcface_insight_tf():
import tensorflow as tf
tf.compat.v1.reset_default_graph()
transformer = load_resource("arcface_insight_tf", "baseline")
fake_sample = get_fake_sample()
run_baseline("arcface_insight_tf")
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 512
def test_gabor_graph():
run_baseline("gabor_graph")
\ No newline at end of file
......@@ -11,8 +11,8 @@ def get_fake_sample(face_size=(160, 160), eyes={"leye": (46, 107), "reye": (46,
return Sample(data, key="1", annotations=annotations)
def test_facenet_baseline():
transformer = load_resource("facenet_sanderberg", "baseline")
def test_facenet():
transformer = load_resource("facenet_sanderberg", "transformer")
fake_sample = get_fake_sample()
......@@ -22,7 +22,7 @@ def test_facenet_baseline():
def test_inception_resnetv2_msceleb():
transformer = load_resource("inception_resnetv2_msceleb", "baseline")
transformer = load_resource("inception_resnetv2_msceleb", "transformer")
fake_sample = get_fake_sample()
......@@ -32,7 +32,7 @@ def test_inception_resnetv2_msceleb():
def test_inception_resnetv2_casiawebface():
transformer = load_resource("inception_resnetv2_casiawebface", "baseline")
transformer = load_resource("inception_resnetv2_casiawebface", "transformer")
fake_sample = get_fake_sample()
......@@ -42,7 +42,7 @@ def test_inception_resnetv2_casiawebface():
def test_inception_resnetv1_msceleb():
transformer = load_resource("inception_resnetv1_msceleb", "baseline")
transformer = load_resource("inception_resnetv1_msceleb", "transformer")
fake_sample = get_fake_sample()
......@@ -52,7 +52,7 @@ def test_inception_resnetv1_msceleb():
def test_inception_resnetv1_casiawebface():
transformer = load_resource("inception_resnetv1_casiawebface", "baseline")
transformer = load_resource("inception_resnetv1_casiawebface", "transformer")
fake_sample = get_fake_sample()
......@@ -64,10 +64,21 @@ def test_inception_resnetv1_casiawebface():
def test_arcface_insight_tf():
import tensorflow as tf