Commit 2e4dd79a authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Added arcface

parent fa53a59d
Pipeline #40428 failed with stage
in 13 minutes and 51 seconds
......@@ -64,3 +64,64 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
)
return transformer
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
.. warning::
This will redirect images to :math:`160 \times 160`
"""
# This is the size of the image that this model expects
CROPPED_IMAGE_HEIGHT = 112
CROPPED_IMAGE_WIDTH = 112
cropped_image_size = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
color_channel = "rgb"
#### SOLVING THE FACE CROPPER TO BE USED
if annotation_type == "bounding-box":
transform_extra_arguments = (("annotations", "annotations"),)
TOP_LEFT_POS = (0, 0)
BOTTOM_RIGHT_POS = (CROPPED_IMAGE_HEIGHT, CROPPED_IMAGE_WIDTH)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"topleft": TOP_LEFT_POS, "bottomright": BOTTOM_RIGHT_POS},
fixed_positions=fixed_positions,
)
elif annotation_type == "eyes-center":
transform_extra_arguments = (("annotations", "annotations"),)
# eye positions for frontal images
RIGHT_EYE_POS = (32, 34)
LEFT_EYE_POS = (32, 77)
# Detects the face and crops it without eye detection
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
cropped_positions={"leye": LEFT_EYE_POS, "reye": RIGHT_EYE_POS},
fixed_positions=fixed_positions,
)
else:
transform_extra_arguments = None
# DEFAULT TO FACE SIMPLE RESIZE
face_cropper = face_crop_solver(cropped_image_size)
transformer = make_pipeline(
wrap(
["sample"],
face_cropper,
transform_extra_arguments=transform_extra_arguments,
),
wrap(["sample"], embedding),
)
return transformer
......@@ -40,7 +40,6 @@ class ArcFace_InsightFaceTF(TensorflowCompatV1):
# https://github.com/luckycallor/InsightFace-tensorflow/blob/master/evaluate.py#L42
data = check_array(data, allow_nd=True)
data = data / 127.5 - 1.0
return super().transform(data)
def load_model(self):
......
......@@ -74,3 +74,19 @@ def test_inception_resnetv1_casiawebface():
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 128
def test_arcface_insight_tf():
import tensorflow as tf
tf.compat.v1.reset_default_graph()
config_name = pkg_resources.resource_filename(
"bob.bio.face", "config/baseline/arcface_insight_tf.py"
)
transformer = load([config_name]).transformer
fake_sample = get_fake_sample()
transformed_sample = transformer.transform([fake_sample])[0]
transformed_data = transformed_sample.data
assert transformed_sample.data.size == 512
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment