Skip to content
Snippets Groups Projects
Commit cc4cc182 authored by Laurent COLBOIS's avatar Laurent COLBOIS
Browse files

Refactoring DNN baselines which only differ by the specific extractor

parent 87a4224d
No related branches found
No related tags found
1 merge request!119Refactor baseline config helpers
Pipeline #50766 passed
Showing
with 97 additions and 255 deletions
from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import arcface_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer( return arcface_baseline(
cropped_image_size=cropped_image_size,
embedding=ArcFaceInsightFace(memory_demanding=memory_demanding), embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import ( from bob.bio.face.embeddings.tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547, FaceNetSanderberg_20170512_110547,
) )
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import facenet_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return facenet_baseline(
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding), embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
import bob.bio.face
from sklearn.pipeline import make_pipeline from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver from bob.bio.face.helpers import face_crop_solver
import numpy as np import numpy as np
......
from bob.bio.face.embeddings.tf2_inception_resnet import ( from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_Casia_CenterLoss_2018, InceptionResnetv1_Casia_CenterLoss_2018,
) )
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import facenet_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return facenet_baseline(
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv1_Casia_CenterLoss_2018( embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding memory_demanding=memory_demanding
), ),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import ( from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_MsCeleb_CenterLoss_2018, InceptionResnetv1_MsCeleb_CenterLoss_2018,
) )
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import facenet_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return facenet_baseline(
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018( embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding memory_demanding=memory_demanding
), ),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import ( from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_Casia_CenterLoss_2018, InceptionResnetv2_Casia_CenterLoss_2018,
) )
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import facenet_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return facenet_baseline(
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv2_Casia_CenterLoss_2018( embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding memory_demanding=memory_demanding
), ),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import ( from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_MsCeleb_CenterLoss_2018, InceptionResnetv2_MsCeleb_CenterLoss_2018,
) )
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import facenet_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return facenet_baseline(
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018( embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding memory_demanding=memory_demanding
), ),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021 from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import arcface_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer( return arcface_baseline(
cropped_image_size=cropped_image_size,
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding), embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021 from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import arcface_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return arcface_baseline(
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding), embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021 from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.helpers import lookup_config_from_database
lookup_config_from_database, from bob.bio.face.config.baseline.templates import arcface_baseline
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING return arcface_baseline(
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding), embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
from bob.bio.face.config.baseline.helpers import (
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
def arcface_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def facenet_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
from bob.extension import rc from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2 from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.preprocessor import FaceCrop from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.helpers import ( from bob.bio.face.config.baseline.templates import facenet_baseline
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from sklearn.pipeline import make_pipeline
from bob.pipelines.wrappers import wrap
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database() annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None): def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"] extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
embedding = InceptionResnetv2( embedding = InceptionResnetv2(
checkpoint_path=extractor_path, memory_demanding=memory_demanding checkpoint_path=extractor_path, memory_demanding=memory_demanding
) )
# ASSEMBLE TRANSFORMER return facenet_baseline(
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding, embedding=embedding,
cropped_positions=cropped_positions, annotation_type=annotation_type,
fixed_positions=fixed_positions, fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
) )
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions) pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer transformer = pipeline.transformer
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment