Created proper baselines

parent d93b03ff
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.embeddings import FaceNetSanderberg
from bob.bio.face.helpers import face_crop_solver
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = "eyes-center"
annotation_type = None
fixed_positions = None
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (46, 53)
LEFT_EYE_POS = (46, 107)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
embedding = FaceNetSanderberg()
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
transformer = embedding_transformer_160x160(FaceNetSanderberg(), annotation_type, fixed_positions)
\ No newline at end of file
import bob.bio.face
from sklearn.pipeline import make_pipeline
from bob.bio.base.wrappers import wrap_sample_preprocessor
from bob.pipelines import wrap
from bob.bio.face.helpers import face_crop_solver
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will redirect images to :math:`160 \times 160`
"""
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 160
CROPPED_IMAGE_WIDTH = 160
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (46, 53)
LEFT_EYE_POS = (46, 107)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
from bob.bio.face.embeddings import InceptionResnetv1_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv1_CasiaWebFace(), annotation_type, fixed_positions)
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv1_MsCeleb(), annotation_type, fixed_positions)
from bob.bio.face.embeddings import InceptionResnetv2_CasiaWebFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv2_CasiaWebFace(), annotation_type, fixed_positions)
from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb
from bob.bio.face.config.baseline.helpers import embedding_transformer_160x160
if "database" in locals():
annotation_type = database.annotation_type
fixed_positions = database.fixed_positions
else:
annotation_type = None
fixed_positions = None
transformer = embedding_transformer_160x160(InceptionResnetv2_MsCeleb(), annotation_type, fixed_positions)
\ No newline at end of file
#!/usr/bin/env python
from bob.bio.face.database import MobioBioDatabase
from bob.bio.base.pipelines.vanilla_biometrics import DatabaseConnector
from bob.extension import rc
mobio_image_directory = "[YOUR_MOBIO_IMAGE_DIRECTORY]"
mobio_annotation_directory = "[YOUR_MOBIO_ANNOTATION_DIRECTORY]"
database = DatabaseConnector(
MobioBioDatabase(
original_directory=rc["bob.db.mobio.directory"],
annotation_directory=rc["bob.db.mobio.annotation_directory"],
original_extension=".png",
protocol="mobile0-male",
)
)
database.allow_scoring_with_all_biometric_references = True
mobio_image = MobioBioDatabase(
original_directory=mobio_image_directory,
original_extension=".png",
annotation_directory=mobio_annotation_directory,
annotation_type="eyecenter",
protocol='male',
models_depend_on_protocol=True,
)
mobio_image_directory = rc["bob.db.mobio.directory"]
mobio_annotation_directory = rc["bob.db.mobio.annotation_directory"]
mobio_male = MobioBioDatabase(
original_directory=mobio_image_directory,
original_extension=".png",
annotation_directory=mobio_annotation_directory,
annotation_type="eyecenter",
allow_scoring_with_all_biometric_references = True
annotation_type = "eyes-center"
fixed_positions = None
protocol='male',
models_depend_on_protocol=True,
all_files_options={'gender': 'male'},
extractor_training_options={'gender': 'male'},
projector_training_options={'gender': 'male'},
enroller_training_options={'gender': 'male'},
z_probe_options={'gender': 'male'}
mobio_image = DatabaseConnector(
MobioBioDatabase(
original_directory=mobio_image_directory,
original_extension=".png",
annotation_directory=mobio_annotation_directory,
annotation_type="eyecenter",
protocol="male",
models_depend_on_protocol=True,
),
allow_scoring_with_all_biometric_references=allow_scoring_with_all_biometric_references,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
mobio_female = MobioBioDatabase(
original_directory=mobio_image_directory,
original_extension=".png",
annotation_directory=mobio_annotation_directory,
annotation_type="eyecenter",
mobio_male = DatabaseConnector(
MobioBioDatabase(
original_directory=mobio_image_directory,
original_extension=".png",
annotation_directory=mobio_annotation_directory,
annotation_type="eyecenter",
protocol="male",
models_depend_on_protocol=True,
all_files_options={"gender": "male"},
extractor_training_options={"gender": "male"},
projector_training_options={"gender": "male"},
enroller_training_options={"gender": "male"},
z_probe_options={"gender": "male"},
),
allow_scoring_with_all_biometric_references=allow_scoring_with_all_biometric_references,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
protocol='female',
models_depend_on_protocol=True,
all_files_options={'gender': 'female'},
extractor_training_options={'gender': 'female'},
projector_training_options={'gender': 'female'},
enroller_training_options={'gender': 'female'},
z_probe_options={'gender': 'female'}
mobio_female = DatabaseConnector(
MobioBioDatabase(
original_directory=mobio_image_directory,
original_extension=".png",
annotation_directory=mobio_annotation_directory,
annotation_type="eyecenter",
protocol="female",
models_depend_on_protocol=True,
all_files_options={"gender": "female"},
extractor_training_options={"gender": "female"},
projector_training_options={"gender": "female"},
enroller_training_options={"gender": "female"},
z_probe_options={"gender": "female"},
),
allow_scoring_with_all_biometric_references=allow_scoring_with_all_biometric_references,
annotation_type=annotation_type,
fixed_positions=fixed_positions,
)
......@@ -19,10 +19,58 @@ def test_facenet_baseline():
fake_sample = get_fake_sample()
# transformed_sample = transformer.transform([fake_sample])[0].data
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
def test_inception_resnetv2_msceleb():
config_name = pkg_resources.resource_filename(
"bob.bio.face", "config/baseline/inception_resnetv2_msceleb.py"
)
transformer = load([config_name]).transformer
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
def test_inception_resnetv2_casiawebface():
config_name = pkg_resources.resource_filename(
"bob.bio.face", "config/baseline/inception_resnetv2_casiawebface.py"
)
transformer = load([config_name]).transformer
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
pass
def test_inception_resnetv1_msceleb():
config_name = pkg_resources.resource_filename(
"bob.bio.face", "config/baseline/inception_resnetv1_msceleb.py"
)
transformer = load([config_name]).transformer
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
def test_inception_resnetv1_casiawebface():
config_name = pkg_resources.resource_filename(
"bob.bio.face", "config/baseline/inception_resnetv1_casiawebface.py"
)
transformer = load([config_name]).transformer
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment