Commit 195e72ae authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

Merge branch 'dask-pipelines'

parents 501942e8 a1c2d24a
Pipeline #44912 failed with stages
in 1 minute and 39 seconds
......@@ -13,3 +13,4 @@ sphinx
dist
record.txt
build/
bob/bio/face/embeddings/data
......@@ -4,7 +4,6 @@ from . import algorithm
from . import script
from . import database
from . import annotator
from . import baseline
from . import test
......
......@@ -296,7 +296,6 @@ class GaborJet(Algorithm):
return graph_scoring(local_scores)
def score_for_multiple_models(self, models, probe):
self._check_feature(probe)
[self._check_feature(m) for model in models for m in model]
......
......@@ -155,4 +155,10 @@ class Histogram (Algorithm):
def read_feature(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def train_enroller(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def load_enroller(*args, **kwargs) : pass
def score_for_multiple_models(*args, **kwargs) : raise NotImplementedError("This function is not implemented and should not be called.")
def score_for_multiple_models(self, models, probe):
self._check_feature(probe, self._is_sparse(probe))
scores = [ self.score(m, probe) for m in models ]
return scores
......@@ -5,9 +5,6 @@ import bob.bio.face.preprocessor # import for documentation
class Base(bob.bio.base.annotator.Annotator):
"""Base class for all face annotators"""
def __init__(self, **kwargs):
super(Base, self).__init__(**kwargs)
def annotate(self, sample, **kwargs):
"""Annotates an image and returns annotations in a dictionary. All
annotator should return at least the ``topleft`` and ``bottomright``
......
__all__ = [_ for _ in dir() if not _.startswith('_')]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
"""
Defining some face recognition baselines
"""
from bob.bio.base.baseline import Baseline
eigenface = Baseline(name="eigenface",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='linearize',
algorithm='pca')
lda = Baseline(name="lda",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='eigenface',
algorithm='lda')
plda = Baseline(name="plda",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='linearize',
algorithm='pca+plda')
gabor_graph = Baseline(name="gabor_graph",
preprocessors={'default': 'inorm-lbp-crop', 'atnt': 'inorm-lbp'},
extractor='grid-graph',
algorithm='gabor-jet')
lgbphs = Baseline(name="lgbphs",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='lgbphs',
algorithm='histogram')
gmm = Baseline(name="gmm",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='gmm')
isv = Baseline(name="isv",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='isv')
ivector = Baseline(name="gmm",
preprocessors={'default': 'tan-triggs-crop', 'atnt': 'tan-triggs'},
extractor='dct-blocks',
algorithm='ivector-cosine')
bic = Baseline(name="bic",
preprocessors={'default': 'face-crop-eyes', 'atnt': 'base'},
extractor='grid-graph',
algorithm='bic-jets')
#!/usr/bin/env python
import bob.bio.base
import bob.ip.gabor
similarity_function = bob.ip.gabor.Similarity("PhaseDiffPlusCanberra", bob.ip.gabor.Transform())
def gabor_jet_similarities(f1, f2):
"""Computes the similarity vector between two Gabor graph features"""
assert len(f1) == len(f2)
return [similarity_function(f1[i], f2[i]) for i in range(len(f1))]
algorithm = bob.bio.base.algorithm.BIC(
# measure to compare two features in input space
comparison_function = gabor_jet_similarities,
# load and save functions
read_function = bob.ip.gabor.load_jets,
write_function = bob.ip.gabor.save_jets,
# Limit the number of training pairs
maximum_training_pair_count = 1000000,
# Dimensions of intrapersonal and extrapersonal subspaces
subspace_dimensions = (20, 20),
multiple_model_scoring = 'max'
)
#!/usr/bin/env python
import bob.bio.face
import math
algorithm = bob.bio.face.algorithm.GaborJet(
# Gabor jet comparison
gabor_jet_similarity_type = 'PhaseDiffPlusCanberra',
multiple_feature_scoring = 'max_jet',
# Gabor wavelet setup
gabor_sigma = math.sqrt(2.) * math.pi,
)
#!/usr/bin/env python
import bob.bio.face
import bob.math
algorithm = bob.bio.face.algorithm.Histogram(
distance_function = bob.math.histogram_intersection,
is_distance_function = False
)
from bob.bio.face.embeddings import ArcFace_InsightFaceTF
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
ArcFace_InsightFaceTF(), annotation_type, fixed_positions
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
\ No newline at end of file
from bob.pipelines import wrap
from sklearn.pipeline import make_pipeline
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.pipelines.transformers import SampleLinearize
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
import bob.ip.color
from sklearn.base import TransformerMixin, BaseEstimator
class ToGray(TransformerMixin, BaseEstimator):
def transform(self, X, annotations=None):
return [bob.ip.color.rgb_to_gray(data)[0:10,0:10] for data in X]
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
def fit(self, X, y=None):
return self
def load(annotation_type, fixed_positions=None):
transform_extra_arguments = (("annotations", "annotations"),)
transformer = make_pipeline(
wrap(
["sample"],
ToGray(),
transform_extra_arguments=transform_extra_arguments,
),
SampleLinearize(),
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings import FaceNetSanderberg
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
FaceNetSanderberg(), annotation_type, fixed_positions
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
BioAlgorithmLegacy,
)
from bob.bio.face.config.baseline.helpers import crop_80x64
import math
import numpy as np
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
import tempfile
import os
import logging
logger = logging.getLogger(__name__)
#### SOLVING IF THERE'S ANY DATABASE INFORMATION
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
####### SOLVING THE FACE CROPPER TO BE USED ##########
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper=face_cropper, dtype=np.float64
)
#### FEATURE EXTRACTOR ######
# legacy objects needs to be wrapped with legacy transformers
from bob.bio.base.transformers import ExtractorTransformer
gabor_graph = ExtractorTransformer(
bob.bio.face.extractor.GridGraph(
# Gabor parameters
gabor_sigma=math.sqrt(2.0) * math.pi,
# what kind of information to extract
normalize_gabor_jets=True,
# setup of the fixed grid
node_distance=(8, 8),
)
)
transformer = make_pipeline(
wrap(
["sample"], preprocessor, transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], gabor_graph),
)
gabor_jet = bob.bio.face.algorithm.GaborJet(
gabor_jet_similarity_type="PhaseDiffPlusCanberra",
multiple_feature_scoring="max_jet",
gabor_sigma=math.sqrt(2.0) * math.pi,
)
# Set default temporary directory
default_temp = os.path.join("/idiap","temp",os.environ["USER"])
if os.path.exists(default_temp):
tempdir = os.path.join(default_temp, "bob_bio_base_tmp")
else:
# if /idiap/temp/<USER> does not exist, use /tmp/tmpxxxxxxxx
tempdir = tempfile.TemporaryDirectory().name
algorithm = BioAlgorithmLegacy(gabor_jet, base_dir=tempdir)
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
import numpy as np
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`160 \times 160`
"""
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (46, 53)
LEFT_EYE_POS = (46, 107)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
"""
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 112
CROPPED_IMAGE_WIDTH = 112
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (32, 34)
LEFT_EYE_POS = (32, 77)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
"""
Crops a face to :math:`80 \times 64`
Parameters
----------
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center` and None
fixed_positions: tuple
A tuple containing the annotations. This is used in case your input is already registered
with fixed positions (eyes or bounding box)
color_channel: str
Returns
-------
face_cropper:
A face cropper to be used
transform_extra_arguments:
The parameters to the transformer
"""
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
# eye positions for frontal images
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = color_channel
dtype = np.float64
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
dtype=dtype
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
dtype=dtype
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
return face_cropper, transform_extra_arguments
from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
\ No newline at end of file
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None