Commit 6d9351d7 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'redoing-pipelines' into 'dask-pipelines'

Adding some baselines as transformers

See merge request !66
parents 1697a446 8a6cca79
Pipeline #40574 failed with stage
in 4 minutes and 50 seconds
......@@ -4,7 +4,6 @@ from . import algorithm
from . import script
from . import database
from . import annotator
from . import baseline
from . import test
......
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
"""
Defining some face recognition baselines
"""
from bob.bio.base.baseline import Baseline
lda = Baseline(name="lda",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='eigenface',
algorithm='lda')
plda = Baseline(name="plda",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='linearize',
algorithm='pca+plda')
gabor_graph = Baseline(name="gabor_graph",
preprocessors={'default': 'inorm-lbp-crop', 'atnt': 'inorm-lbp'},
extractor='grid-graph',
algorithm='gabor-jet')
lgbphs = Baseline(name="lgbphs",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='lgbphs',
algorithm='histogram')
gmm = Baseline(name="gmm",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='gmm')
isv = Baseline(name="isv",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='isv')
ivector = Baseline(name="gmm",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='ivector-cosine')
bic = Baseline(name="bic",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='grid-graph',
algorithm='bic-jets')
#!/usr/bin/env python
import bob.bio.base
import bob.ip.gabor
similarity_function = bob.ip.gabor.Similarity("PhaseDiffPlusCanberra", bob.ip.gabor.Transform())
def gabor_jet_similarities(f1, f2):
"""Computes the similarity vector between two Gabor graph features"""
assert len(f1) == len(f2)
return [similarity_function(f1[i], f2[i]) for i in range(len(f1))]
algorithm = bob.bio.base.algorithm.BIC(
# measure to compare two features in input space
comparison_function = gabor_jet_similarities,
# load and save functions
read_function = bob.ip.gabor.load_jets,
write_function = bob.ip.gabor.save_jets,
# Limit the number of training pairs
maximum_training_pair_count = 1000000,
# Dimensions of intrapersonal and extrapersonal subspaces
subspace_dimensions = (20, 20),
multiple_model_scoring = 'max'
)
#!/usr/bin/env python
import bob.bio.face
import math
algorithm = bob.bio.face.algorithm.GaborJet(
# Gabor jet comparison
gabor_jet_similarity_type = 'PhaseDiffPlusCanberra',
multiple_feature_scoring = 'max_jet',
# Gabor wavelet setup
gabor_sigma = math.sqrt(2.) * math.pi,
)
#!/usr/bin/env python
import bob.bio.face
import bob.math
algorithm = bob.bio.face.algorithm.Histogram(
distance_function = bob.math.histogram_intersection,
is_distance_function = False
)
from bob.bio.face.embeddings import ArcFace_InsightFaceTF
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_112x112(ArcFace_InsightFaceTF(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import FaceNetSanderberg
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(
FaceNetSanderberg(), annotation_type, fixed_positions
)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
import math
import numpy as np
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
import tempfile
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper=face_cropper, dtype=np.float64
)
#### FEATURE EXTRACTOR ######
# legacy objects needs to be wrapped with legacy transformers
from bob.bio.base.transformers import ExtractorTransformer
gabor_graph = ExtractorTransformer(
bob.bio.face.extractor.GridGraph(
# Gabor parameters
gabor_sigma=math.sqrt(2.0) * math.pi,
# what kind of information to extract
normalize_gabor_jets=True,
# setup of the fixed grid
node_distance=(8, 8),
)
)
transformer = make_pipeline(
wrap(
["sample"], preprocessor, transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], gabor_graph),
)
gabor_jet = bob.bio.face.algorithm.GaborJet(
gabor_jet_similarity_type="PhaseDiffPlusCanberra",
multiple_feature_scoring="max_jet",
gabor_sigma=math.sqrt(2.0) * math.pi,
)
tempdir = tempfile.TemporaryDirectory()
algorithm = BioAlgorithmLegacy(gabor_jet, base_dir=tempdir.name)
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`160 \times 160`
"""
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (46, 53)
LEFT_EYE_POS = (46, 107)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
"""
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 112
CROPPED_IMAGE_WIDTH = 112
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (32, 34)
LEFT_EYE_POS = (32, 77)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
"""
Crops a face to :math:`80 \times 64`
Parameters
----------
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None
fixed_positions: tuple
A tuple containing the annotations. This is used in case your input is already registered
with fixed positions (eyes or bounding box)
color_channel: str
Returns
-------
face_cropper:
A face cropper to be used
transform_extra_arguments:
The parameters to the transformer
"""
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
# eye positions for frontal images
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = color_channel
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
return face_cropper, transform_extra_arguments
from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv1_MsCeleb(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv2_CasiaWebFace(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import Distance, VanillaBiometricsPipeline
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions)
algorithm = Distance()
pipeline = VanillaBiometricsPipeline(
transformer,
algorithm
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
import numpy as np
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
import tempfile
from bob.bio.base.transformers import AlgorithmTransformer
from bob.pipelines.transformers import SampleLinearize
import os
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
preprocessor = bob.bio.face.preprocessor.TanTriggs(
face_cropper=face_cropper, dtype=np.float64
)
#### FEATURE EXTRACTOR ######
tempdir = tempfile.TemporaryDirectory()
lda = bob.bio.base.algorithm.LDA(use_pinv=True, pca_subspace_dimension=0.90)
lda_transformer = AlgorithmTransformer(
lda, projector_file=os.path.join(tempdir.name, "Projector.hdf5")
)
transformer = make_pipeline(
wrap(
["sample"], preprocessor, transform_extra_arguments=transform_extra_arguments,
),
SampleLinearize(),
wrap(["sample"], lda_transformer),
)
### BIOMETRIC ALGORITHM
algorithm = BioAlgorithmLegacy(
lda,
base_dir=tempdir.name,
projector_file=os.path.join(tempdir.name, "Projector.hdf5"),
)
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
import math