Skip to content
Snippets Groups Projects
Commit cc4cc182 authored by Laurent COLBOIS's avatar Laurent COLBOIS
Browse files

Refactoring DNN baselines which only differ by the specific extractor

parent 87a4224d
No related branches found
No related tags found
1 merge request!119Refactor baseline config helpers
Pipeline #50766 passed
Showing
with 97 additions and 255 deletions
from bob.bio.face.embeddings.mxnet_models import ArcFaceInsightFace
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return arcface_baseline(
embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547,
)
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return facenet_baseline(
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
import numpy as np
......
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_Casia_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return facenet_baseline(
embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_MsCeleb_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return facenet_baseline(
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_Casia_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return facenet_baseline(
embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_MsCeleb_CenterLoss_2018,
)
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return facenet_baseline(
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return arcface_baseline(
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return arcface_baseline(
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return arcface_baseline(
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.config.baseline.helpers import (
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
def arcface_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def facenet_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.config.baseline.helpers import (
lookup_config_from_database,
dnn_default_cropping,
embedding_transformer,
)
from sklearn.pipeline import make_pipeline
from bob.pipelines.wrappers import wrap
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
from bob.bio.face.config.baseline.helpers import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database()
def load(annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
embedding = InceptionResnetv2(
checkpoint_path=extractor_path, memory_demanding=memory_demanding
)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
return facenet_baseline(
embedding=embedding,
cropped_positions=cropped_positions,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment