Skip to content
Snippets Groups Projects
Commit 179c75a3 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Merge branch 'arcface' into 'master'

Arcface from InsightFace

See merge request !79
parents 14fa2b41 c1339770
No related branches found
No related tags found
1 merge request!79Arcface from InsightFace
Pipeline #45495 canceled
......@@ -14,3 +14,4 @@ dist
record.txt
build/
bob/bio/face/embeddings/data
*.DS_Store
from bob.bio.face.embeddings import ArcFace_InsightFaceTF
from bob.bio.face.embeddings import ArcFaceInsightFace
from bob.bio.face.config.baseline.helpers import embedding_transformer_112x112
from bob.bio.base.pipelines.vanilla_biometrics import (
Distance,
......@@ -16,7 +16,7 @@ else:
def load(annotation_type, fixed_positions=None):
transformer = embedding_transformer_112x112(
ArcFace_InsightFaceTF(), annotation_type, fixed_positions
ArcFaceInsightFace(), annotation_type, fixed_positions, color_channel="rgb"
)
algorithm = Distance()
......
......@@ -111,7 +111,7 @@ def legacy_default_cropping(cropped_image_size, annotation_type):
return cropped_positions
def embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions=None):
def embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions=None, color_channel = "rgb"):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
......@@ -120,8 +120,6 @@ def embedding_transformer(cropped_image_size, embedding, annotation_type, croppe
This will resize images to the requested `image_size`
"""
color_channel = "rgb"
face_cropper = face_crop_solver(
cropped_image_size,
color_channel=color_channel,
......@@ -142,7 +140,7 @@ def embedding_transformer(cropped_image_size, embedding, annotation_type, croppe
return transformer
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
def embedding_transformer_160x160(embedding, annotation_type, fixed_positions, color_channel="rgb"):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
......@@ -153,10 +151,10 @@ def embedding_transformer_160x160(embedding, annotation_type, fixed_positions):
"""
cropped_positions = embedding_transformer_default_cropping((160, 160), annotation_type)
return embedding_transformer((160, 160), embedding, annotation_type, cropped_positions, fixed_positions)
return embedding_transformer((160, 160), embedding, annotation_type, cropped_positions, fixed_positions, color_channel=color_channel)
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
def embedding_transformer_112x112(embedding, annotation_type, fixed_positions, color_channel="rgb"):
"""
Creates a pipeline composed by and FaceCropper and an Embedding extractor.
This transformer is suited for Facenet based architectures
......@@ -166,7 +164,6 @@ def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
"""
cropped_image_size = (112, 112)
if annotation_type == "eyes-center":
# Hard coding eye positions for backward consistency
cropped_positions = {'leye': (32, 77), 'reye': (32, 34)}
......@@ -174,7 +171,7 @@ def embedding_transformer_112x112(embedding, annotation_type, fixed_positions):
# Will use default
cropped_positions = embedding_transformer_default_cropping(cropped_image_size, annotation_type)
return embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions)
return embedding_transformer(cropped_image_size, embedding, annotation_type, cropped_positions, fixed_positions, color_channel=color_channel)
def crop_80x64(annotation_type, fixed_positions=None, color_channel="gray"):
......
......@@ -34,6 +34,8 @@ from .tf2_inception_resnet import (
FaceNetSanderberg_20170512_110547
)
from .mxnet_models import ArcFaceInsightFace
# gets sphinx autodoc done right - don't remove it
def __appropriate__(*args):
"""Says object was actually declared here, and not in the import module.
......@@ -56,6 +58,7 @@ __appropriate__(
InceptionResnetv1_MsCeleb_CenterLoss_2018,
InceptionResnetv2_Casia_CenterLoss_2018,
InceptionResnetv1_Casia_CenterLoss_2018,
FaceNetSanderberg_20170512_110547
FaceNetSanderberg_20170512_110547,
ArcFaceInsightFace
)
__all__ = [_ for _ in dir() if not _.startswith("_")]
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
import os
from sklearn.base import TransformerMixin, BaseEstimator
from .tensorflow_compat_v1 import TensorflowCompatV1
from bob.io.image import to_matplotlib
import numpy as np
from sklearn.utils import check_array
class ArcFace_InsightFaceTF(TensorflowCompatV1):
"""
Models copied from
https://github.com/luckycallor/InsightFace-tensorflow/blob/master/backbones/utils.py
The input shape of this model is :math:`3 \times 112 \times 112`
The output embedding is :math:`n \times 512`, where :math:`n` is the number of samples
"""
def __init__(self):
bob_rc_variable = "bob.bio.face.arcface_tf_path"
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/arcface_insight_tf.tar.gz"
]
model_subdirectory = "arcface_tf_path"
checkpoint_filename = self.get_modelpath(bob_rc_variable, model_subdirectory)
self.download_model(checkpoint_filename, urls)
input_shape = (1, 112, 112, 3)
architecture_fn = init_network
super().__init__(checkpoint_filename, input_shape, architecture_fn)
def transform(self, data):
# https://github.com/luckycallor/InsightFace-tensorflow/blob/master/evaluate.py#L42
data = check_array(data, allow_nd=True)
data = data / 127.5 - 1.0
return super().transform(data)
def load_model(self):
self.input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, shape=self.input_shape, name="input_image",
)
prelogits = self.architecture_fn(self.input_tensor)
self.embedding = prelogits
# Initializing the variables of the current graph
self.session = tf.compat.v1.Session()
self.session.run(tf.compat.v1.global_variables_initializer())
# Loading the last checkpoint and overwriting the current variables
saver = tf.compat.v1.train.Saver()
if os.path.splitext(self.checkpoint_filename)[1] == ".meta":
saver.restore(
self.session,
tf.train.latest_checkpoint(os.path.dirname(self.checkpoint_filename)),
)
elif os.path.isdir(self.checkpoint_filename):
saver.restore(
self.session, tf.train.latest_checkpoint(self.checkpoint_filename)
)
else:
saver.restore(self.session, self.checkpoint_filename)
self.loaded = True
###########################
# CODE COPIED FROM
# https://github.com/luckycallor/InsightFace-tensorflow/blob/master/backbones/utils.py
###########################
import tensorflow as tf
import tensorflow.contrib.slim as slim
from collections import namedtuple
def init_network(input_tensor):
with tf.variable_scope("embd_extractor", reuse=False):
arg_sc = resnet_arg_scope()
with slim.arg_scope(arg_sc):
net, _ = resnet_v2_m_50(input_tensor, is_training=False, return_raw=True)
net = slim.batch_norm(net, activation_fn=None, is_training=False)
net = slim.dropout(net, keep_prob=1, is_training=False)
net = slim.flatten(net)
net = slim.fully_connected(net, 512, normalizer_fn=None, activation_fn=None)
net = slim.batch_norm(
net, scale=False, activation_fn=None, is_training=False
)
# end_points['embds'] = net
return net
def resnet_v2_m_50(
inputs,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
spatial_squeeze=True,
reuse=None,
scope="resnet_v2_50",
):
"""ResNet-50 model of [1]. See resnet_v2() for arg and return description."""
blocks = [
resnet_v2_block("block1", base_depth=16, num_units=3, stride=2),
resnet_v2_block("block2", base_depth=32, num_units=4, stride=2),
resnet_v2_block("block3", base_depth=64, num_units=14, stride=2),
resnet_v2_block("block4", base_depth=128, num_units=3, stride=2),
]
return resnet_v2_m(
inputs,
blocks,
num_classes,
is_training=is_training,
return_raw=return_raw,
global_pool=global_pool,
output_stride=output_stride,
include_root_block=True,
spatial_squeeze=spatial_squeeze,
reuse=reuse,
scope=scope,
)
def resnet_v2_block(scope, base_depth, num_units, stride):
return Block(
scope,
block,
[{"depth": base_depth * 4, "stride": stride}]
+ (num_units - 1) * [{"depth": base_depth * 4, "stride": 1}],
)
class Block(namedtuple("Block", ["scope", "unit_fn", "args"])):
"""A named tuple describing a ResNet block.
Its parts are:
scope: The scope of the `Block`.
unit_fn: The ResNet unit function which takes as input a `Tensor` and returns another `Tensor` with the output of the ResNet unit.
args: A list of length equal to the number of units in the `Block`. The list contains one (depth, depth_bottleneck, stride) tuple for each unit in the block to serve as argument to unit_fn.
"""
pass
def resnet_v2_m(
inputs,
blocks,
num_classes=None,
is_training=True,
return_raw=True,
global_pool=True,
output_stride=None,
include_root_block=True,
spatial_squeeze=True,
reuse=None,
scope=None,
):
with tf.variable_scope(scope, "resnet_v2", [inputs], reuse=reuse) as sc:
end_points_collection = sc.original_name_scope + "_end_points"
with slim.arg_scope(
[slim.conv2d, bottleneck, stack_blocks_dense],
outputs_collections=end_points_collection,
):
with slim.arg_scope([slim.batch_norm], is_training=is_training):
net = inputs
if include_root_block:
if output_stride is not None:
if output_stride % 4 != 0:
raise ValueError(
"The output_stride needs to be a multiple of 4."
)
output_stride /= 4
with slim.arg_scope(
[slim.conv2d], activation_fn=None, normalizer_fn=None
):
net = conv2d_same(net, 64, 3, stride=1, scope="conv1")
# net = slim.max_pool2d(net, [3, 3], stride=2, scope='pool1')
net = stack_blocks_dense(net, blocks, output_stride)
end_points = slim.utils.convert_collection_to_dict(
end_points_collection
)
if return_raw:
return net, end_points
net = slim.batch_norm(net, activation_fn=tf.nn.relu, scope="postnorm")
end_points[sc.name + "/postnorm"] = net
if global_pool:
net = tf.reduce_mean(net, [1, 2], name="pool5", keep_dims=True)
end_points["global_pool"] = net
if num_classes:
net = slim.conv2d(
net,
num_classes,
[1, 1],
activation_fn=None,
normalizer_fn=None,
scope="logits",
)
end_points[sc.name + "/logits"] = net
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name="SpatialSqueeze")
end_points[sc.name + "/spatial_squeeze"] = net
end_points["predictions"] = slim.softmax(net, scope="predictions")
return net, end_points
def conv2d_same(inputs, num_outputs, kernel_size, stride, rate=1, scope=None):
if stride == 1:
return slim.conv2d(
inputs,
num_outputs,
kernel_size,
stride=1,
rate=rate,
padding="SAME",
scope=scope,
)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
inputs = tf.pad(
inputs, [[0, 0], [pad_beg, pad_end], [pad_beg, pad_end], [0, 0]]
) # zero padding
return slim.conv2d(
inputs,
num_outputs,
kernel_size,
stride=stride,
rate=rate,
padding="VALID",
scope=scope,
)
@slim.add_arg_scope
def stack_blocks_dense(
net,
blocks,
output_stride=None,
store_non_strided_activations=False,
outputs_collections=None,
):
current_stride = 1
rate = 1
for block in blocks:
with tf.variable_scope(block.scope, "block", [net]) as sc:
block_stride = 1
for i, unit in enumerate(block.args):
if store_non_strided_activations and i == len(block.args) - 1:
block_stride = unit.get("stride", 1)
unit = dict(unit, stride=1)
with tf.variable_scope("unit_%d" % (i + 1), values=[net]):
if output_stride is not None and current_stride == output_stride:
net = block.unit_fn(net, rate=rate, **dict(unit, stride=1))
rate *= unit.get("stride", 1)
else:
net = block.unit_fn(net, rate=1, **unit)
current_stride *= unit.get("stride", 1)
if output_stride is not None and current_stride > output_stride:
raise ValueError(
"The target output_stride cannot be reached."
)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
if output_stride is not None and current_stride == output_stride:
rate *= block_stride
else:
net = subsample(net, block_stride)
current_stride *= block_stride
if output_stride is not None and current_stride > output_stride:
raise ValueError("The target output_stride cannot be reached.")
if output_stride is not None and current_stride != output_stride:
raise ValueError("The target output_stride cannot be reached.")
return net
def block(inputs, depth, stride, rate=1, outputs_collections=None, scope=None):
with tf.variable_scope(scope, "block_v2", [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.leaky_relu, scope="preact")
if depth == depth_in:
shortcut = subsample(inputs, stride, "shortcut")
else:
shortcut = slim.conv2d(
preact,
depth,
[1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope="shortcut",
)
residual = conv2d_same(preact, depth, 3, stride, rate=rate, scope="conv1")
residual = slim.conv2d(
residual,
depth,
[3, 3],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope="conv2",
)
# residual = slim.conv2d(residual, depth, [1, 1], stride=1, normalizer_fn=None, activation_fn=None, scope='conv3')
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
@slim.add_arg_scope
def bottleneck(
inputs,
depth,
depth_bottleneck,
stride,
rate=1,
outputs_collections=None,
scope=None,
):
with tf.variable_scope(scope, "bottleneck_v2", [inputs]) as sc:
depth_in = slim.utils.last_dimension(inputs.get_shape(), min_rank=4)
preact = slim.batch_norm(inputs, activation_fn=tf.nn.leaky_relu, scope="preact")
if depth == depth_in:
shortcut = subsample(inputs, stride, "shortcut")
else:
shortcut = slim.conv2d(
preact,
depth,
[1, 1],
stride=stride,
normalizer_fn=None,
activation_fn=None,
scope="shortcut",
)
residual = slim.conv2d(
preact, depth_bottleneck, [1, 1], stride=1, scope="conv1"
)
residual = conv2d_same(
residual, depth_bottleneck, 3, stride, rate=rate, scope="conv2"
)
residual = slim.conv2d(
residual,
depth,
[1, 1],
stride=1,
normalizer_fn=None,
activation_fn=None,
scope="conv3",
)
output = shortcut + residual
return slim.utils.collect_named_outputs(outputs_collections, sc.name, output)
def subsample(inputs, factor, scope=None):
if factor == 1:
return inputs
else:
return slim.max_pool2d(
inputs, [1, 1], stride=factor, scope=scope
) # padding='VALID'
def resnet_arg_scope(
weight_decay=0.0001,
batch_norm_decay=0.9,
batch_norm_epsilon=2e-5,
batch_norm_scale=True,
activation_fn=tf.nn.leaky_relu,
use_batch_norm=True,
batch_norm_updates_collections=tf.GraphKeys.UPDATE_OPS,
):
batch_norm_params = {
"decay": batch_norm_decay,
"epsilon": batch_norm_epsilon,
"scale": batch_norm_scale,
"updates_collections": batch_norm_updates_collections,
"fused": None, # Use fused batch norm if possible.
"param_regularizers": {"gamma": slim.l2_regularizer(weight_decay)},
}
with slim.arg_scope(
[slim.conv2d],
weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.contrib.layers.xavier_initializer(uniform=False),
activation_fn=activation_fn,
normalizer_fn=slim.batch_norm if use_batch_norm else None,
normalizer_params=batch_norm_params,
):
with slim.arg_scope([slim.batch_norm], **batch_norm_params):
with slim.arg_scope([slim.max_pool2d], padding="SAME") as arg_sc:
return arg_sc
"""
Load and predict using checkpoints based on mxnet
"""
from sklearn.base import TransformerMixin, BaseEstimator
from sklearn.utils import check_array
import numpy as np
from bob.bio.face.embeddings import download_model
import pkg_resources
import os
from bob.extension import rc
class ArcFaceInsightFace(TransformerMixin, BaseEstimator):
"""
ArcFace from Insight Face.
Model and source code taken from the repository
https://github.com/deepinsight/insightface/blob/master/python-package/insightface/model_zoo/face_recognition.py
"""
def __init__(self, use_gpu=False, **kwargs):
super().__init__(**kwargs)
self.model = None
self.use_gpu = use_gpu
internal_path = pkg_resources.resource_filename(
__name__, os.path.join("data", "arcface_insightface"),
)
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.ArcFaceInsightFace"] is None
else rc["bob.bio.face.models.ArcFaceInsightFace"]
)
urls = [
"https://www.idiap.ch/software/bob/data/bob/bob.bio.face/master/mxnet/arcface_r100_v1_mxnet.tar.gz"
]
download_model(checkpoint_path, urls, "arcface_r100_v1_mxnet.tar.gz")
self.checkpoint_path = checkpoint_path
def load_model(self):
import mxnet as mx
sym, arg_params, aux_params = mx.model.load_checkpoint(
os.path.join(self.checkpoint_path, "model"), 0
)
all_layers = sym.get_internals()
sym = all_layers["fc1_output"]
# LOADING CHECKPOINT
ctx = mx.gpu() if self.use_gpu else mx.cpu()
model = mx.mod.Module(symbol=sym, context=ctx, label_names=None)
data_shape = (1, 3, 112, 112)
model.bind(data_shapes=[("data", data_shape)])
model.set_params(arg_params, aux_params)
# warmup
data = mx.nd.zeros(shape=data_shape)
db = mx.io.DataBatch(data=(data,))
model.forward(db, is_train=False)
embedding = model.get_outputs()[0].asnumpy()
self.model = model
def transform(self, X):
import mxnet as mx
if self.model is None:
self.load_model()
X = check_array(X, allow_nd=True)
X = mx.nd.array(X)
db = mx.io.DataBatch(data=(X,))
self.model.forward(db, is_train=False)
return self.model.get_outputs()[0].asnumpy()
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
return d
def _more_tags(self):
return {"stateless": True, "requires_fit": False}
......@@ -49,7 +49,7 @@ class InceptionResnet(TransformerMixin, BaseEstimator):
if self.preprocessor is not None:
X = self.preprocessor(tf.cast(X, "float32"))
prelogits = self.model(X, training=False)
prelogits = self.model.predict_on_batch(X)
embeddings = tf.math.l2_normalize(prelogits, axis=-1)
return embeddings
......@@ -68,13 +68,10 @@ class InceptionResnet(TransformerMixin, BaseEstimator):
return self.inference(X).numpy()
def __setstate__(self, d):
self.__dict__ = d
def __getstate__(self):
# Handling unpicklable objects
d = self.__dict__.copy()
d["model"] = None
d["model"] = None
return d
def _more_tags(self):
......@@ -146,7 +143,6 @@ class InceptionResnetv2_Casia_CenterLoss_2018(InceptionResnet):
)
class InceptionResnetv1_Casia_CenterLoss_2018(InceptionResnet):
"""
InceptionResnet v1 model trained in 2018 using the CasiaWebFace dataset in the context of the work:
......@@ -172,12 +168,13 @@ class InceptionResnetv1_Casia_CenterLoss_2018(InceptionResnet):
download_model(
checkpoint_path, urls, "inceptionresnetv1_casia_centerloss_2018.tar.gz"
)
)
super(InceptionResnetv1_Casia_CenterLoss_2018, self).__init__(
checkpoint_path, preprocessor=tf.image.per_image_standardization,
)
class InceptionResnetv1_MsCeleb_CenterLoss_2018(InceptionResnet):
"""
InceptionResnet v1 model trained in 2018 using the MsCeleb dataset in the context of the work:
......@@ -193,7 +190,8 @@ class InceptionResnetv1_MsCeleb_CenterLoss_2018(InceptionResnet):
checkpoint_path = (
internal_path
if rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"] is None
if rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
is None
else rc["bob.bio.face.models.InceptionResnetv1_MsCeleb_CenterLoss_2018"]
)
......@@ -203,7 +201,7 @@ class InceptionResnetv1_MsCeleb_CenterLoss_2018(InceptionResnet):
download_model(
checkpoint_path, urls, "inceptionresnetv1_msceleb_centerloss_2018.tar.gz"
)
)
super(InceptionResnetv1_MsCeleb_CenterLoss_2018, self).__init__(
checkpoint_path, preprocessor=tf.image.per_image_standardization,
......@@ -249,8 +247,8 @@ class FaceNetSanderberg_20170512_110547(InceptionResnet):
download_model(
checkpoint_path, urls, "facenet_sanderberg_20170512_110547.tar.gz"
)
)
super(FaceNetSanderberg_20170512_110547, self).__init__(
checkpoint_path, tf.image.per_image_standardization,
)
\ No newline at end of file
)
......@@ -16,9 +16,11 @@ def change_color_channel(image, color_channel):
+ " image from a gray level image!"
)
return image
if color_channel == "rgb":
return image
if color_channel == "bgr":
return image[[2,1,0],...]
if color_channel == "gray":
return bob.ip.color.rgb_to_gray(image)
if color_channel == "red":
......
......@@ -12,8 +12,8 @@ import os
import bob.io.base
import functools
import copy
import tensorflow as tf
from bob.bio.base.test.utils import is_library_available
images = dict()
images["bioref"] = (
......@@ -54,7 +54,8 @@ def get_fake_samples_for_training():
annotations = {"reye": (131, 176), "leye": (222, 170)}
return [
Sample(x, key=str(i), subject=str(i), annotations=annotations) for i,x in enumerate(data)
Sample(x, key=str(i), subject=str(i), annotations=annotations)
for i, x in enumerate(data)
]
......@@ -72,9 +73,7 @@ def run_baseline(baseline, samples_for_training=[]):
with tempfile.TemporaryDirectory() as d:
cpy = copy.deepcopy(pipeline)
checkpoint_pipeline = checkpoint_vanilla_biometrics(
cpy, base_dir=d
)
checkpoint_pipeline = checkpoint_vanilla_biometrics(cpy, base_dir=d)
checkpoint_scores = checkpoint_pipeline([], biometric_references, probes)
assert len(checkpoint_scores) == 1
......@@ -106,37 +105,40 @@ def run_baseline(baseline, samples_for_training=[]):
assert "samplewrapper-2" in dirs
assert "scores" in dirs
@is_library_available("tensorflow")
def test_facenet_baseline():
run_baseline("facenet-sanderberg")
@is_library_available("tensorflow")
def test_inception_resnetv2_msceleb():
run_baseline("inception-resnetv2-msceleb")
@is_library_available("tensorflow")
def test_inception_resnetv2_casiawebface():
run_baseline("inception-resnetv2-casiawebface")
@is_library_available("tensorflow")
def test_inception_resnetv1_msceleb():
run_baseline("inception-resnetv1-msceleb")
@is_library_available("tensorflow")
def test_inception_resnetv1_casiawebface():
run_baseline("inception-resnetv1-casiawebface")
"""
def test_arcface_insight_tf():
import tensorflow as tf
tf.compat.v1.reset_default_graph()
@is_library_available("mxnet")
def test_arcface_insightface():
run_baseline("arcface-insightface")
run_baseline("arcface-insight-tf")
"""
def test_gabor_graph():
run_baseline("gabor_graph")
#def test_lda():
# def test_lda():
# run_baseline("lda", get_fake_samples_for_training())
......@@ -3,8 +3,10 @@ import bob.io.base
import numpy as np
from bob.pipelines import Sample, wrap
import pkg_resources
from bob.bio.base.test.utils import is_library_available
@is_library_available("tensorflow")
def test_idiap_inceptionv2_msceleb():
from bob.bio.face.embeddings import InceptionResnetv2_MsCeleb_CenterLoss_2018
......@@ -28,6 +30,7 @@ def test_idiap_inceptionv2_msceleb():
assert output.size == 128, output.shape
@is_library_available("tensorflow")
def test_idiap_inceptionv2_casia():
from bob.bio.face.embeddings import InceptionResnetv2_Casia_CenterLoss_2018
......@@ -45,6 +48,7 @@ def test_idiap_inceptionv2_casia():
assert output.size == 128, output.shape
@is_library_available("tensorflow")
def test_idiap_inceptionv1_msceleb():
from bob.bio.face.embeddings import InceptionResnetv1_MsCeleb_CenterLoss_2018
......@@ -62,6 +66,7 @@ def test_idiap_inceptionv1_msceleb():
assert output.size == 128, output.shape
@is_library_available("tensorflow")
def test_idiap_inceptionv1_casia():
from bob.bio.face.embeddings import InceptionResnetv1_Casia_CenterLoss_2018
......@@ -78,6 +83,8 @@ def test_idiap_inceptionv1_casia():
assert output.size == 128, output.shape
@is_library_available("tensorflow")
def test_facenet_sanderberg():
from bob.bio.face.embeddings import FaceNetSanderberg_20170512_110547
......@@ -94,18 +101,14 @@ def test_facenet_sanderberg():
assert output.size == 128, output.shape
"""
def test_arface_insight_tf():
import tensorflow as tf
tf.compat.v1.reset_default_graph()
from bob.bio.face.embeddings import ArcFace_InsightFaceTF
@is_library_available("mxnet")
def test_arcface_insight_face():
from bob.bio.face.embeddings import ArcFaceInsightFace
np.random.seed(10)
transformer = ArcFace_InsightFaceTF()
data = np.random.rand(3, 112, 112).astype("uint8")
output = transformer.transform([data])[0]
transformer = ArcFaceInsightFace()
data = np.random.rand(3, 112, 112) * 255
data = data.astype("uint8")
output = transformer.transform([data])
assert output.size == 512, output.shape
# Sample Batch
......@@ -113,4 +116,3 @@ def test_arface_insight_tf():
transformer_sample = wrap(["sample"], transformer)
output = [s.data for s in transformer_sample.transform([sample])][0]
assert output.size == 512, output.shape
"""
\ No newline at end of file
......@@ -52,6 +52,7 @@ requirements:
optional:
- bob.learn.tensorflow
- tensorflow
- mxnet
test:
imports:
......
......@@ -139,7 +139,7 @@ setup(
'inception-resnetv2-casiawebface = bob.bio.face.config.baseline.inception_resnetv2_casiawebface:transformer',
'inception-resnetv1-msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb:transformer',
'inception-resnetv2-msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb:transformer',
'arcface-insight-tf = bob.bio.face.config.baseline.arcface_insight_tf:transformer',
'arcface-insightface = bob.bio.face.config.baseline.arcface_insightface:transformer',
'gabor-graph = bob.bio.face.config.baseline.gabor_graph:transformer',
'lgbphs = bob.bio.face.config.baseline.lgbphs:transformer',
'dummy = bob.bio.face.config.baseline.dummy:transformer',
......@@ -153,7 +153,7 @@ setup(
'inception-resnetv1-msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb:pipeline',
'inception-resnetv2-msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb:pipeline',
'gabor_graph = bob.bio.face.config.baseline.gabor_graph:pipeline',
'arcface-insight-tf = bob.bio.face.config.baseline.arcface_insight_tf:pipeline',
'arcface-insightface = bob.bio.face.config.baseline.arcface_insightface:pipeline',
'lgbphs = bob.bio.face.config.baseline.lgbphs:pipeline',
'lda = bob.bio.face.config.baseline.lda:pipeline',
'dummy = bob.bio.face.config.baseline.dummy:pipeline',
......@@ -166,7 +166,7 @@ setup(
'inception-resnetv1-msceleb = bob.bio.face.config.baseline.inception_resnetv1_msceleb',
'inception-resnetv2-msceleb = bob.bio.face.config.baseline.inception_resnetv2_msceleb',
'gabor_graph = bob.bio.face.config.baseline.gabor_graph',
'arcface-insight-tf = bob.bio.face.config.baseline.arcface_insight_tf',
'arcface-insightface = bob.bio.face.config.baseline.arcface_insightface',
'lgbphs = bob.bio.face.config.baseline.lgbphs',
'lda = bob.bio.face.config.baseline.lda',
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment