Commit 036a308f authored by Amir MOHAMMADI's avatar Amir MOHAMMADI

improve logging

parent 54a80323
......@@ -13,12 +13,12 @@ def mean_cross_entropy_loss(logits, labels, add_regularization_losses=True):
"""
Simple CrossEntropy loss.
Basically it wrapps the function tf.nn.sparse_softmax_cross_entropy_with_logits.
**Parameters**
logits:
labels:
add_regularization_losses: Regulize the loss???
"""
with tf.variable_scope('cross_entropy_loss'):
......@@ -50,7 +50,7 @@ def mean_cross_entropy_center_loss(logits,
"""
Implementation of the CrossEntropy + Center Loss from the paper
"A Discriminative Feature Learning Approach for Deep Face Recognition"(http://ydwen.github.io/papers/WenECCV16.pdf)
**Parameters**
logits:
prelogits:
......@@ -67,7 +67,7 @@ def mean_cross_entropy_center_loss(logits,
logits=logits, labels=labels),
name="cross_entropy_loss")
tf.add_to_collection(tf.GraphKeys.LOSSES, cross_loss)
tf.summary.scalar('cross_entropy_loss', cross_loss)
tf.summary.scalar('loss_cross_entropy', cross_loss)
# Appending center loss
with tf.variable_scope('center_loss'):
......@@ -79,14 +79,14 @@ def mean_cross_entropy_center_loss(logits,
initializer=tf.constant_initializer(0),
trainable=False)
#label = tf.reshape(labels, [-1])
# label = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
diff = (1 - alpha) * (centers_batch - prelogits)
centers = tf.scatter_sub(centers, labels, diff)
center_loss = tf.reduce_mean(tf.square(prelogits - centers_batch))
tf.add_to_collection(tf.GraphKeys.REGULARIZATION_LOSSES,
center_loss * factor)
tf.summary.scalar('center_loss', center_loss)
tf.summary.scalar('loss_center', center_loss)
# Adding the regularizers in the loss
with tf.variable_scope('total_loss'):
......@@ -95,7 +95,7 @@ def mean_cross_entropy_center_loss(logits,
total_loss = tf.add_n(
[cross_loss] + regularization_losses, name="total_loss")
tf.add_to_collection(tf.GraphKeys.LOSSES, total_loss)
tf.summary.scalar('total_loss', total_loss)
tf.summary.scalar('loss_total', total_loss)
loss = dict()
loss['loss'] = total_loss
......
......@@ -3,16 +3,15 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import logging
logger = logging.getLogger("bob.learn.tensorflow")
import tensorflow as tf
from bob.learn.tensorflow.utils import (
compute_euclidean_distance,
)
from bob.learn.tensorflow.utils import compute_euclidean_distance
logger = logging.getLogger(__name__)
def contrastive_loss(left_embedding,
right_embedding,
labels,
contrastive_margin=2.0):
def contrastive_loss(left_embedding, right_embedding, labels, contrastive_margin=2.0):
"""
Compute the contrastive loss as in
......@@ -49,18 +48,16 @@ def contrastive_loss(left_embedding,
with tf.name_scope("within_class"):
one = tf.constant(1.0)
within_class = tf.multiply(one - labels,
tf.square(d)) # (1-Y)*(d^2)
within_class_loss = tf.reduce_mean(
within_class, name="within_class")
within_class = tf.multiply(one - labels, tf.square(d)) # (1-Y)*(d^2)
within_class_loss = tf.reduce_mean(within_class, name="within_class")
tf.add_to_collection(tf.GraphKeys.LOSSES, within_class_loss)
with tf.name_scope("between_class"):
max_part = tf.square(tf.maximum(contrastive_margin - d, 0))
between_class = tf.multiply(
labels, max_part) # (Y) * max((margin - d)^2, 0)
between_class_loss = tf.reduce_mean(
between_class, name="between_class")
labels, max_part
) # (Y) * max((margin - d)^2, 0)
between_class_loss = tf.reduce_mean(between_class, name="between_class")
tf.add_to_collection(tf.GraphKeys.LOSSES, between_class_loss)
with tf.name_scope("total_loss"):
......@@ -68,8 +65,8 @@ def contrastive_loss(left_embedding,
loss = tf.reduce_mean(loss, name="contrastive_loss")
tf.add_to_collection(tf.GraphKeys.LOSSES, loss)
tf.summary.scalar('contrastive_loss', loss)
tf.summary.scalar('between_class', between_class_loss)
tf.summary.scalar('within_class', within_class_loss)
tf.summary.scalar("contrastive_loss", loss)
tf.summary.scalar("between_class", between_class_loss)
tf.summary.scalar("within_class", within_class_loss)
return loss
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment