Commit 172e1259 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Organizing the baselines

parent db921441
Pipeline #51195 failed with stage
in 1 minute and 26 seconds
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.embeddings.pytorch import AFFFE_2021
from bob.pipelines import wrap
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
from bob.bio.face.embeddings.pytorch import afffe_baseline
from bob.bio.face.utils import lookup_config_from_database
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions=cropped_positions,
color_channel="rgb",
fixed_positions=fixed_positions,
allow_upside_down_normalized_faces=True,
annotation_type, fixed_positions, _ = lookup_config_from_database(
locals().get("database")
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
extractor_transformer = AFFFE_2021()
def load(annotation_type, fixed_positions=None):
return afffe_baseline(annotation_type, fixed_positions)
# Algorithm
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together
transformer = make_pipeline(
wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
pipeline = load(annotation_type, fixed_positions)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
from bob.bio.face.embeddings.mxnet import ArcFaceInsightFace_LResNet100
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
memory_demanding = False
from bob.bio.face.embeddings.mxnet import arcface_baseline
from bob.bio.face.utils import lookup_config_from_database
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
......@@ -23,19 +7,12 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
ArcFaceInsightFace_LResNet100(memory_demanding=memory_demanding),
annotation_type,
fixed_positions,
color_channel="rgb",
)
return arcface_baseline(
embedding=ArcFaceInsightFace(memory_demanding=memory_demanding),
embedding=ArcFaceInsightFace_LResNet100(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547,
)
from bob.bio.face.embeddings.tensorflow import facenet_sanderberg_20170512_110547
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
......@@ -10,12 +7,9 @@ annotation_type, fixed_positions, memory_demanding = lookup_config_from_database
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=FaceNetSanderberg_20170512_110547(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
return facenet_sanderberg_20170512_110547(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_Casia_CenterLoss_2018,
)
from bob.bio.face.embeddings.tensorflow import inception_resnet_v1_casia_centerloss_2018
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv1_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v1_casia_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv1_MsCeleb_CenterLoss_2018,
from bob.bio.face.embeddings.tensorflow import (
inception_resnet_v1_msceleb_centerloss_2018,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv1_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v1_msceleb_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_Casia_CenterLoss_2018,
)
from bob.bio.face.embeddings.tensorflow import inception_resnet_v2_casia_centerloss_2018
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv2_Casia_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v2_casia_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.tf2_inception_resnet import (
InceptionResnetv2_MsCeleb_CenterLoss_2018,
from bob.bio.face.embeddings.tensorflow import (
inception_resnet_v2_msceleb_centerloss_2018,
)
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return facenet_baseline(
embedding=InceptionResnetv2_MsCeleb_CenterLoss_2018(
memory_demanding=memory_demanding
),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return inception_resnet_v2_msceleb_centerloss_2018(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.mobilenet_v2 import MobileNetv2_MsCeleb_ArcFace_2021
from bob.bio.face.embeddings.tensorflow import mobilenetv2_msceleb_arcface_2021
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=MobileNetv2_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return mobilenetv2_msceleb_arcface_2021(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import MxNetModel
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(112, 112),
cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
extractor_transformer = MxNetModel()
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together
transformer = make_pipeline(
wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
......@@ -14,19 +14,24 @@ from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
annotator_transformer = BobIpTinyface()
preprocessor_transformer = FaceCrop(cropped_image_size=(112,112), cropped_positions={'leye':(49,72), 'reye':(49,38)}, color_channel='rgb',annotator=annotator_transformer)
preprocessor_transformer = FaceCrop(
cropped_image_size=(112, 112),
cropped_positions={"leye": (49, 72), "reye": (49, 38)},
color_channel="rgb",
annotator=annotator_transformer,
)
extractor_transformer = MxNetModel()
algorithm = Distance(distance_function = scipy.spatial.distance.cosine,is_distance_function = True)
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
transformer = make_pipeline(
wrap(["sample"], preprocessor_transformer),
wrap(["sample"], extractor_transformer)
wrap(["sample"], preprocessor_transformer), wrap(["sample"], extractor_transformer)
)
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
import bob.bio.base
from bob.bio.face.preprocessor import FaceCrop
from bob.bio.face.extractor import PyTorchLibraryModel
from facenet_pytorch import InceptionResnetV1
from bob.bio.base.algorithm import Distance
from bob.bio.base.pipelines.vanilla_biometrics.legacy import BioAlgorithmLegacy
import scipy.spatial
from bob.bio.base.pipelines.vanilla_biometrics import Distance
from sklearn.pipeline import make_pipeline
from bob.pipelines import wrap
from bob.bio.base.pipelines.vanilla_biometrics import VanillaBiometricsPipeline
memory_demanding = False
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
memory_demanding = (
database.memory_demanding if hasattr(database, "memory_demanding") else False
)
else:
annotation_type = None
fixed_positions = None
cropped_positions = {"leye": (49, 72), "reye": (49, 38)}
cropped_positions = {"leye": (110, 144), "reye": (110, 96)}
preprocessor_transformer = FaceCrop(
cropped_image_size=(224, 224),
cropped_positions={"leye": (110, 144), "reye": (110, 96)},
color_channel="rgb",
fixed_positions=fixed_positions,
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
transform_extra_arguments = (
None
if (cropped_positions is None or fixed_positions is not None)
else (("annotations", "annotations"),)
)
model = InceptionResnetV1(pretrained="vggface2").eval()
extractor_transformer = PyTorchLibraryModel(model=model)
algorithm = Distance(
distance_function=scipy.spatial.distance.cosine, is_distance_function=True
)
# Chain the Transformers together
transformer = make_pipeline(
wrap(
["sample"],
preprocessor_transformer,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], extractor_transformer)
# Add more transformers here if needed
)
# Assemble the Vanilla Biometric pipeline and execute
pipeline = VanillaBiometricsPipeline(transformer, algorithm)
transformer = pipeline.transformer
from bob.bio.face.embeddings.resnet50 import Resnet50_MsCeleb_ArcFace_2021
from bob.bio.face.embeddings.tensorflow import resnet50_msceleb_arcface_2021
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=Resnet50_MsCeleb_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return resnet50_msceleb_arcface_2021(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.embeddings.resnet50 import Resnet50_VGG2_ArcFace_2021
from bob.bio.face.embeddings.tensorflow import resnet50_vgg2_arcface_2021
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import arcface_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
return arcface_baseline(
embedding=Resnet50_VGG2_ArcFace_2021(memory_demanding=memory_demanding),
annotation_type=annotation_type,
fixed_positions=fixed_positions,
def load(annotation_type, fixed_positions=None, memory_demanding=None):
return resnet50_vgg2_arcface_2021(
annotation_type, fixed_positions, memory_demanding
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer
pipeline = load(annotation_type, fixed_positions, memory_demanding)
from bob.bio.face.utils import (
dnn_default_cropping,
embedding_transformer,
)
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
VanillaBiometricsPipeline,
)
def arcface_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {
"leye": (55, 81),
"reye": (55, 42),
}
else:
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
def facenet_baseline(embedding, annotation_type, fixed_positions=None):
# DEFINE CROPPING
cropped_image_size = (160, 160)
cropped_positions = dnn_default_cropping(cropped_image_size, annotation_type)
# ASSEMBLE TRANSFORMER
transformer = embedding_transformer(
cropped_image_size=cropped_image_size,
embedding=embedding,
cropped_positions=cropped_positions,
fixed_positions=fixed_positions,
color_channel="rgb",
annotator="mtcnn",
)
algorithm = Distance()
return VanillaBiometricsPipeline(transformer, algorithm)
from bob.extension import rc
from bob.bio.face.embeddings.tf2_inception_resnet import InceptionResnetv2
from bob.bio.face.utils import lookup_config_from_database
from bob.bio.face.config.baseline.templates import facenet_baseline
annotation_type, fixed_positions, memory_demanding = lookup_config_from_database(
locals().get("database")
)
def load(annotation_type, fixed_positions=None):
extractor_path = rc["bob.bio.face.tf2.casia-webface-inception-v2"]
embedding = InceptionResnetv2(
checkpoint_path=extractor_path, memory_demanding=memory_demanding
)
return facenet_baseline(
embedding=embedding,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
pipeline = load(annotation_type, fixed_positions)
transformer = pipeline.transformer