Commit 0fc3ec84 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'tf2-pipelines' into 'master'

TF2 pipelines

See merge request !77
parents b2843c75 f964d887
Pipeline #45165 failed with stages
in 55 seconds
from bob.bio.face.embeddings import FaceNetSanderberg
from bob.bio.face.embeddings import FaceNetSanderberg_20170512_110547
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -17,7 +17,7 @@ else:
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
FaceNetSanderberg(), annotation_type, fixed_positions
FaceNetSanderberg_20170512_110547(), annotation_type, fixed_positions
)
algorithm = Distance()
......
......@@ -24,15 +24,14 @@ else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
####### SOLVING THE FACE CROPPER TO BE USED ##########
def get_cropper(annotation_type, fixed_positions=None):
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
return face_cropper, transform_extra_arguments
def get_pipeline(face_cropper, transform_extra_arguments):
preprocessor = bob.bio.face.preprocessor.INormLBP(
face_cropper=face_cropper, dtype=np.float64
)
......@@ -79,6 +78,11 @@ def load(annotation_type, fixed_positions=None):
algorithm = BioAlgorithmLegacy(gabor_jet, base_dir=tempdir)
return VanillaBiometricsPipeline(transformer, algorithm)
def load(annotation_type, fixed_positions=None):
####### SOLVING THE FACE CROPPER TO BE USED ##########
face_cropper, transform_extra_arguments = get_cropper(annotation_type, fixed_positions)
return get_pipeline(face_cropper, transform_extra_arguments)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
......@@ -5,55 +5,131 @@ from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
import numpy as np
def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with Facenet-like
Embedding extractors, proportionally to the target image size
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
Parameters
----------
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`160 \times 160`
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
if annotation_type == "bounding-box":
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
elif annotation_type == "eyes-center":
RIGHT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(1/3*CROPPED_IMAGE_WIDTH))
LEFT_EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(2/3*CROPPED_IMAGE_WIDTH))
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "left-profile":
EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(3/8*CROPPED_IMAGE_WIDTH))
cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
elif annotation_type == "right-profile":
EYE_POS = (round(2/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
MOUTH_POS = (round(5/7*CROPPED_IMAGE_HEIGHT), round(5/8*CROPPED_IMAGE_WIDTH))
cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
else:
cropped_positions = None
return cropped_positions
def legacy_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with legacy extractors,
proportionally to the target image size
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
Parameters
----------
cropped_image_size : tuple
A tuple (HEIGHT, WIDTH) describing the target size of the cropped image.
annotation_type: str
Type of annotations. Possible values are: `bounding-box`, `eyes-center`, 'left-profile',
'right-profile' and None
Returns
-------
cropped_positions:
The dictionary of cropped positions that will be feeded to the FaceCropper.
"""
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS}
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (46, 53)
LEFT_EYE_POS = (46, 107)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS}
elif annotation_type == "left-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 3 - 2)
cropped_positions={'leye': EYE_POS, 'mouth': MOUTH_POS}
elif annotation_type == "right-profile":
# Main reference https://gitlab.idiap.ch/bob/bob.chapter.FRICE/-/blob/master/bob/chapter/FRICE/script/pose.py
EYE_POS = (CROPPED_IMAGE_HEIGHT//5, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
MOUTH_POS = (CROPPED_IMAGE_HEIGHT//3 * 2, CROPPED_IMAGE_WIDTH // 7 * 4 + 2)
cropped_positions={'reye': EYE_POS, 'mouth': MOUTH_POS}
else:
cropped_positions = None
return cropped_positions
def embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions=None):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to the requested `image_size`
"""
color_channel = "rgb"
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
transformer = make_pipeline(
wrap(
......@@ -66,66 +142,39 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
return transformer
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
This will resize images to :math:`160 \times 160`
"""
cropped_positions = embedding_transformer_default_cropping((160, 160), annotation_type)
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 112
CROPPED_IMAGE_WIDTH = 112
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
return embedding_transformer((160, 160), embedding, annotation_type, cropped_positions, fixed_positions)
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (32, 34)
LEFT_EYE_POS = (32, 77)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
"""
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {'leye': (32, 77), 'reye': (32, 34)}
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
# Will use default
cropped_positions = embedding_transformer_default_cropping(cropped_image_size, annotation_type)
return transformer
return embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions)
def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
......@@ -156,49 +205,25 @@ def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
The parameters to the transformer
"""
color_channel = color_channel
dtype = np.float64
# Cropping
CROPPED_IMAGE_HEIGHT = 80
CROPPED_IMAGE_WIDTH = CROPPED_IMAGE_HEIGHT * 4 // 5
# eye positions for frontal images
RIGHT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 - 1)
LEFT_EYE_POS = (CROPPED_IMAGE_HEIGHT // 5, CROPPED_IMAGE_WIDTH // 4 * 3)
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = color_channel
dtype = np.float64
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
cropped_positions = legacy_default_cropping(cropped_image_size, annotation_type)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
dtype=dtype
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
dtype=dtype
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transform_extra_arguments = None if cropped_positions is None else (("annotations", "annotations"),)
return face_cropper, transform_extra_arguments
from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
from bob.bio.face.embeddings import InceptionResnetv1_Casia_CenterLoss_2018
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -15,7 +15,7 @@ else:
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions
InceptionResnetv1_Casia_CenterLoss_2018(), annotation_type, fixed_positions
)
algorithm = Distance()
......
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb_CenterLoss_2018
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -16,7 +16,7 @@ else:
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_MsCeleb(), annotation_type, fixed_positions
InceptionResnetv1_MsCeleb_CenterLoss_2018(), annotation_type, fixed_positions
)
algorithm = Distance()
......
from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
from bob.bio.face.embeddings import InceptionResnetv2_Casia_CenterLoss_2018
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -16,7 +16,7 @@ else:
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv2_CasiaWebFace(), annotation_type, fixed_positions
InceptionResnetv2_Casia_CenterLoss_2018(), annotation_type, fixed_positions
)
algorithm = Distance()
......
from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb_CenterLoss_2018
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -16,7 +16,7 @@ else:
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions
InceptionResnetv2_MsCeleb_CenterLoss_2018(), annotation_type, fixed_positions
)
algorithm = Distance()
......
......@@ -21,15 +21,14 @@ else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
####### SOLVING THE FACE CROPPER TO BE USED ##########
def get_cropper(annotation_type, fixed_positions=None):
# Cropping
face_cropper, transform_extra_arguments = crop_80x64(
annotation_type, fixed_positions, color_channel="gray"
)
return face_cropper, transform_extra_arguments
def get_pipeline(face_cropper, transform_extra_arguments):
preprocessor = bob.bio.face.preprocessor.TanTriggs(
face_cropper=face_cropper, dtype=np.float64
)
......@@ -56,7 +55,6 @@ def load(annotation_type, fixed_positions=None):
)
### BIOMETRIC ALGORITHM
histogram = bob.bio.face.algorithm.Histogram(
distance_function = bob.math.histogram_intersection,
......@@ -69,5 +67,10 @@ def load(annotation_type, fixed_positions=None):
return VanillaBiometricsPipeline(transformer, algorithm)
def load(annotation_type, fixed_positions=None):
####### SOLVING THE FACE CROPPER TO BE USED ##########
face_cropper, transform_extra_arguments = get_cropper(annotation_type, fixed_positions)
return get_pipeline(face_cropper, transform_extra_arguments)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
\ No newline at end of file
from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_default_cropping,
embedding_transformer
)
from sklearn.pipeline import make_pipeline
from bob.pipelines.wrappers import wrap
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
def load(annotation_type, fixed_positions=None):
CROPPED_IMAGE_SIZE = (160, 160)
CROPPED_POSITIONS = embedding_transformer_default_cropping(CROPPED_IMAGE_SIZE,
annotation_type=annotation_type)
extractor_path = rc['bob.bio.face.tf2.casia-webface-inception-v2']
embedding = InceptionResnetv2(checkpoint_path=extractor_path)
transformer = embedding_transformer(CROPPED_IMAGE_SIZE,
embedding,
annotation_type,
CROPPED_POSITIONS,
fixed_positions)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from .facenet_sanderberg import FaceNetSanderberg
from .idiap_inception_resnet import (
InceptionResnetv2_MsCeleb,
InceptionResnetv2_CasiaWebFace,
InceptionResnetv1_MsCeleb,
InceptionResnetv1_CasiaWebFace
)
import os
import bob.extension.download
def download_model(model_path, urls, zip_file="model.tar.gz"):
"""
Download and unzip a model from some URL.
Parameters
----------
model_path: str
Path where the model is supposed to be stored
urls: list
List of paths where the model is stored
from .arface import ArcFace_InsightFaceTF
zip_file: str
File name after the download
"""
if not os.path.exists(model_path):
os.makedirs(model_path, exist_ok=True)
zip_file = os.path.join(model_path, zip_file)
bob.extension.download.download_and_unzip(urls, zip_file)
from .tf2_inception_resnet import (
InceptionResnet,
InceptionResnetv2_MsCeleb_CenterLoss_2018,
InceptionResnetv2_Casia_CenterLoss_2018,
InceptionResnetv1_MsCeleb_CenterLoss_2018,
InceptionResnetv1_Casia_CenterLoss_2018,
FaceNetSanderberg_20170512_110547
)
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
......@@ -26,11 +51,11 @@ def __appropriate__(*args):
__appropriate__(
FaceNetSanderberg,
InceptionResnetv2_MsCeleb,
InceptionResnetv2_CasiaWebFace,
InceptionResnetv1_MsCeleb,
InceptionResnetv1_CasiaWebFace,
ArcFace_InsightFaceTF
InceptionResnet,
InceptionResnetv2_MsCeleb_CenterLoss_2018,
InceptionResnetv1_MsCeleb_CenterLoss_2018,
InceptionResnetv2_Casia_CenterLoss_2018,
InceptionResnetv1_Casia_CenterLoss_2018,
FaceNetSanderberg_20170512_110547
)
__all__ = [_ for _ in dir() if not _.startswith("_")]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
"""
Wrapper for the free FaceNet variant:
https://github.com/davidsandberg/facenet
Model 20170512-110547
"""
from __future__ import division
from sklearn.base import TransformerMixin, BaseEstimator
import os
import re
import logging
import numpy as np
from bob.ip.color import gray_to_rgb
from bob.io.image import to_matplotlib
from bob.extension import rc
import bob.extension.download
import bob.io.base
from sklearn.utils import check_array
logger = logging.getLogger(__name__)
FACENET_MODELPATH_KEY = "bob.bio.face.facenet_sanderberg_modelpath"
def prewhiten(img):
mean = np.mean(img)
std = np.std(img)
std_adj = np.maximum(std, 1.0 / np.sqrt(img.size))
y = np.multiply(np.subtract(img, mean), 1 / std_adj)
return y
def get_model_filenames(model_dir):