Commit d2ca498b authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

multioutput models can be used instead of embedding validation models

parent aa07a44f
from .alexnet import AlexNet_simplified
from .arcface import ArcFaceLayer
from .arcface import ArcFaceLayer3Penalties
from .arcface import ArcFaceModel
from .densenet import DeepPixBiS
from .densenet import DenseNet
from .densenet import densenet161 # noqa: F401
from .embedding_validation import EmbeddingValidation
from .mine import MineModel
from .resnet50_modified import resnet50_modified # noqa: F401
from .resnet50_modified import resnet101_modified # noqa: F401
......@@ -34,7 +32,5 @@ __appropriate__(
MineModel,
ArcFaceLayer,
ArcFaceLayer3Penalties,
ArcFaceModel,
EmbeddingValidation,
)
__all__ = [_ for _ in dir() if not _.startswith("_")]
......@@ -2,48 +2,6 @@ import math
import tensorflow as tf
from bob.learn.tensorflow.metrics.embedding_accuracy import accuracy_from_embeddings
from .embedding_validation import EmbeddingValidation
class ArcFaceModel(EmbeddingValidation):
def train_step(self, data):
X, y = data
with tf.GradientTape() as tape:
logits, _ = self((X, y), training=True)
loss = self.compiled_loss(
y, logits, sample_weight=None, regularization_losses=self.losses
)
reg_loss = tf.reduce_sum(self.losses)
total_loss = loss + reg_loss
trainable_vars = self.trainable_variables
self.optimizer.minimize(total_loss, trainable_vars, tape=tape)
self.compiled_metrics.update_state(y, logits, sample_weight=None)
tf.summary.scalar("arc_face_loss", data=loss, step=self._train_counter)
tf.summary.scalar("total_loss", data=total_loss, step=self._train_counter)
self.train_loss(loss)
return {m.name: m.result() for m in self.metrics + [self.train_loss]}
def test_step(self, data):
"""
Test Step
"""
images, labels = data
# No worries, labels not used in validation
_, embeddings = self((images, labels), training=False)
self.validation_acc(accuracy_from_embeddings(labels, embeddings))
return {m.name: m.result() for m in [self.validation_acc]}
class ArcFaceLayer(tf.keras.layers.Layer):
"""
......@@ -69,12 +27,13 @@ class ArcFaceLayer(tf.keras.layers.Layer):
If `True`, uses arcface loss. If `False`, it's a regular dense layer
"""
def __init__(self, n_classes=10, s=30, m=0.5, arc=True):
def __init__(self, n_classes, s=30, m=0.5, arc=True):
super(ArcFaceLayer, self).__init__(name="arc_face_logits")
self.n_classes = n_classes
self.s = s
self.arc = arc
self.m = m
self.act32bit = tf.keras.layers.Activation("linear", dtype="float32")
def build(self, input_shape):
super(ArcFaceLayer, self).build(input_shape[0])
......@@ -100,20 +59,28 @@ class ArcFaceLayer(tf.keras.layers.Layer):
sin_yi = tf.clip_by_value(tf.math.sqrt(1 - cos_yi ** 2), 0, 1)
# cos(x+m) = cos(x)*cos(m) - sin(x)*sin(m)
cos_yi_m = cos_yi * self.cos_m - sin_yi * self.sin_m
dtype = cos_yi.dtype
cos_m = tf.cast(self.cos_m, dtype=dtype)
sin_m = tf.cast(self.sin_m, dtype=dtype)
th = tf.cast(self.th, dtype=dtype)
mm = tf.cast(self.mm, dtype=dtype)
cos_yi_m = cos_yi * cos_m - sin_yi * sin_m
cos_yi_m = tf.where(cos_yi > self.th, cos_yi_m, cos_yi - self.mm)
cos_yi_m = tf.where(cos_yi > th, cos_yi_m, cos_yi - mm)
# Preparing the hot-output
one_hot = tf.one_hot(
tf.cast(y, tf.int32), depth=self.n_classes, name="one_hot_mask"
)
one_hot = tf.cast(one_hot, dtype=dtype)
logits = (one_hot * cos_yi_m) + ((1.0 - one_hot) * cos_yi)
logits = self.s * logits
else:
logits = tf.matmul(X, self.W)
logits = self.act32bit(logits)
return logits
......
import tensorflow as tf
from bob.learn.tensorflow.metrics.embedding_accuracy import accuracy_from_embeddings
class EmbeddingValidation(tf.keras.Model):
"""
Use this model if the validation step should validate the accuracy with respect to embeddings.
In this model, the `test_step` runs the function `bob.learn.tensorflow.metrics.embedding_accuracy.accuracy_from_embeddings`
"""
def compile(
self,
single_precision=False,
**kwargs,
):
"""
Compile
"""
super().compile(**kwargs)
self.train_loss = tf.keras.metrics.Mean(name="accuracy")
self.validation_acc = tf.keras.metrics.Mean(name="accuracy")
def train_step(self, data):
"""
Train Step
"""
X, y = data
with tf.GradientTape() as tape:
logits, _ = self(X, training=True)
loss = self.loss(y, logits)
# trainable_vars = self.trainable_variables
self.optimizer.minimize(loss, self.trainable_variables, tape=tape)
self.compiled_metrics.update_state(y, logits, sample_weight=None)
self.train_loss(loss)
tf.summary.scalar("training_loss", data=loss, step=self._train_counter)
return {m.name: m.result() for m in self.metrics + [self.train_loss]}
# self.optimizer.apply_gradients(zip(gradients, trainable_vars))
# self.train_loss(loss)
# return {m.name: m.result() for m in [self.train_loss]}
def test_step(self, data):
"""
Test Step
"""
images, labels = data
logits, prelogits = self(images, training=False)
self.validation_acc(accuracy_from_embeddings(labels, prelogits))
return {m.name: m.result() for m in [self.validation_acc]}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment