Skip to content
Snippets Groups Projects
Commit 3ba8d3cb authored by Laurent COLBOIS's avatar Laurent COLBOIS
Browse files

Refactor DNN baselines

parent 6572614c
No related branches found
No related tags found
1 merge request!119Refactor baseline config helpers
Showing
with 184 additions and 146 deletions
from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_112x112,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -12,10 +13,22 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
ArcFaceInsightFace(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
)
......
......@@ -2,9 +2,11 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547,
)
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_160x160,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
......@@ -14,12 +16,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer_160x160(
FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
......
......@@ -27,10 +27,10 @@ def lookup_config_from_database():
return annotation_type, fixed_positions, memory_demanding
def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
def dnn_default_cropping(cropped_image_size, annotation_type):
"""
Computes the default cropped positions for the FaceCropper used with Facenet-like
Embedding extractors, proportionally to the target image size
Computes the default cropped positions for the FaceCropper used with Neural-Net based
extractors, proportionally to the target image size
Parameters
......@@ -51,8 +51,7 @@ def embedding_transformer_default_cropping(cropped_image_size, annotation_type):
"""
if isinstance(annotation_type, list):
return [
embedding_transformer_default_cropping(cropped_image_size, item)
for item in annotation_type
dnn_default_cropping(cropped_image_size, item) for item in annotation_type
]
CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH = cropped_image_size
......@@ -175,7 +174,6 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
def make_cropper(
cropped_image_size,
annotation_type,
cropped_positions,
fixed_positions=None,
color_channel="rgb",
......@@ -183,12 +181,12 @@ def make_cropper(
):
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
dtype="float64",
annotator=annotator,
color_channel=color_channel,
dtype="float64",
)
transform_extra_arguments = (
......@@ -203,10 +201,10 @@ def make_cropper(
def embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions=None,
color_channel="rgb",
annotator=None,
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
......@@ -216,18 +214,12 @@ def embedding_transformer(
This will resize images to the requested `image_size`
"""
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
face_cropper, transform_extra_arguments = make_cropper(
cropped_image_size=cropped_image_size,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
dtype="float64",
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
color_channel=color_channel,
annotator=annotator,
)
transformer = make_pipeline(
......@@ -242,66 +234,6 @@ def embedding_transformer(
return transformer
def embedding_transformer_160x160(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`160 \times 160`
"""
cropped_positions = embedding_transformer_default_cropping(
(160, 160), annotation_type
)
return embedding_transformer(
(160, 160),
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
)
def embedding_transformer_112x112(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will resize images to :math:`112 \times 112`
"""
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
# Will use default
cropped_positions = embedding_transformer_default_cropping(
cropped_image_size, annotation_type
)
return embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions,
color_channel=color_channel,
)
def embedding_transformer_224x224(
embedding, annotation_type, fixed_positions, color_channel="rgb"
):
......@@ -319,15 +251,12 @@ def embedding_transformer_224x224(
cropped_positions = {"leye": (65, 150), "reye": (65, 77)}
else:
# Will use default
cropped_positions = embedding_transformer_default_cropping(
cropped_image_size, annotation_type
)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
return embedding_transformer(
cropped_image_size,
embedding,
annotation_type,
cropped_positions,
fixed_positions,
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel=color_channel,
)
......@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_Casia_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_160x160,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
......
......@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_MsCeleb_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_160x160,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv1_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
......
......@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_Casia_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_160x160,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv2_Casia_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
......
......@@ -2,8 +2,9 @@ from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_MsCeleb_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_160x160,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -14,10 +15,20 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_160x160(
InceptionResnetv2_MsCeleb_CenterLoss_2018(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
......
from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_112x112,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -12,10 +13,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
......
from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_112x112,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
......@@ -12,10 +14,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
......
from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_112x112,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -12,10 +13,23 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
......
......@@ -2,9 +2,9 @@ from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.config.baseline.helpers import (
embedding_transformer_default_cropping,
embedding_transformer,
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from sklearn.pipeline import make_pipeline
......@@ -18,23 +18,22 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
CROPPED_IMAGE_SIZE = (160, 160)
CROPPED_POSITIONS = embedding_transformer_default_cropping(
CROPPED_IMAGE_SIZE, annotation_type=annotation_type
)
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
embedding = InceptionResnetv2(
checkpoint_path=extractor_path, memory_demanding=memory_demanding
)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
CROPPED_IMAGE_SIZE,
embedding,
annotation_type,
CROPPED_POSITIONS,
fixed_positions,
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment