Commit 9f2c4abf authored by Amir MOHAMMADI's avatar Amir MOHAMMADI
Browse files

blacken code

parent 85c6b71d
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
\ No newline at end of file
__path__ = extend_path(__path__, __name__)
# see https://docs.python.org/3/library/pkgutil.html
from pkgutil import extend_path
__path__ = extend_path(__path__, __name__)
......@@ -3,4 +3,5 @@ def get_config():
Returns a string containing the configuration information.
"""
import bob.extension
return bob.extension.get_config(__name__)
......@@ -30,7 +30,14 @@ class Generator:
The shapes of the returned samples.
"""
def __init__(self, samples, reader, multiple_samples=False, shuffle_on_epoch_end=False, **kwargs):
def __init__(
self,
samples,
reader,
multiple_samples=False,
shuffle_on_epoch_end=False,
**kwargs
):
super().__init__(**kwargs)
self.reader = reader
self.samples = list(samples)
......
......@@ -7,22 +7,24 @@ from functools import partial
from . import append_image_augmentation, from_filename_to_tensor
def shuffle_data_and_labels_image_augmentation(filenames,
labels,
data_shape,
data_type,
batch_size,
epochs=None,
buffer_size=10**3,
gray_scale=False,
output_shape=None,
random_flip=False,
random_brightness=False,
random_contrast=False,
random_saturation=False,
random_rotate=False,
per_image_normalization=True,
extension=None):
def shuffle_data_and_labels_image_augmentation(
filenames,
labels,
data_shape,
data_type,
batch_size,
epochs=None,
buffer_size=10 ** 3,
gray_scale=False,
output_shape=None,
random_flip=False,
random_brightness=False,
random_contrast=False,
random_saturation=False,
random_rotate=False,
per_image_normalization=True,
extension=None,
):
"""
Dump random batches from a list of image paths and labels:
......@@ -95,7 +97,8 @@ def shuffle_data_and_labels_image_augmentation(filenames,
random_saturation=random_saturation,
random_rotate=random_rotate,
per_image_normalization=per_image_normalization,
extension=extension)
extension=extension,
)
dataset = dataset.shuffle(buffer_size).batch(batch_size).repeat(epochs)
......@@ -103,19 +106,21 @@ def shuffle_data_and_labels_image_augmentation(filenames,
return data, labels
def create_dataset_from_path_augmentation(filenames,
labels,
data_shape,
data_type,
gray_scale=False,
output_shape=None,
random_flip=False,
random_brightness=False,
random_contrast=False,
random_saturation=False,
random_rotate=False,
per_image_normalization=True,
extension=None):
def create_dataset_from_path_augmentation(
filenames,
labels,
data_shape,
data_type,
gray_scale=False,
output_shape=None,
random_flip=False,
random_brightness=False,
random_contrast=False,
random_saturation=False,
random_rotate=False,
per_image_normalization=True,
extension=None,
):
"""
Create dataset from a list of tf-record files
......@@ -149,26 +154,29 @@ def create_dataset_from_path_augmentation(filenames,
random_saturation=random_saturation,
random_rotate=random_rotate,
per_image_normalization=per_image_normalization,
extension=extension)
extension=extension,
)
dataset = tf.data.Dataset.from_tensor_slices((filenames, labels))
dataset = dataset.map(parser)
return dataset
def image_augmentation_parser(filename,
label,
data_shape,
data_type,
gray_scale=False,
output_shape=None,
random_flip=False,
random_brightness=False,
random_contrast=False,
random_saturation=False,
random_rotate=False,
per_image_normalization=True,
extension=None):
def image_augmentation_parser(
filename,
label,
data_shape,
data_type,
gray_scale=False,
output_shape=None,
random_flip=False,
random_brightness=False,
random_contrast=False,
random_saturation=False,
random_rotate=False,
per_image_normalization=True,
extension=None,
):
"""
Parses a single tf.Example into image and label tensors.
"""
......@@ -179,7 +187,7 @@ def image_augmentation_parser(filename,
# Reshape image data into the original shape
image = tf.reshape(image, data_shape)
#Applying image augmentation
# Applying image augmentation
image = append_image_augmentation(
image,
gray_scale=gray_scale,
......@@ -189,12 +197,13 @@ def image_augmentation_parser(filename,
random_contrast=random_contrast,
random_saturation=random_saturation,
random_rotate=random_rotate,
per_image_normalization=per_image_normalization)
per_image_normalization=per_image_normalization,
)
label = tf.cast(label, tf.int64)
features = dict()
features['data'] = image
features['key'] = filename
features["data"] = image
features["key"] = filename
return features, label
......
......@@ -227,5 +227,3 @@ def dataset_from_tfrecord(tfrecord, num_parallel_reads=None):
# key = tf.cast(features["key"], tf.string)
# return image, label, key
......@@ -14,26 +14,26 @@ def relativistic_discriminator_loss(
):
"""Relativistic (average) loss
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`real_data`, and must be broadcastable to `real_data` (i.e., all
dimensions must be either `1`, or the same as the corresponding
dimension).
generated_weights: Same as `real_weights`, but for `generated_data`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`real_data`, and must be broadcastable to `real_data` (i.e., all
dimensions must be either `1`, or the same as the corresponding
dimension).
generated_weights: Same as `real_weights`, but for `generated_data`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with tf.compat.v1.name_scope(
scope,
"discriminator_relativistic_loss",
......@@ -75,8 +75,12 @@ def relativistic_discriminator_loss(
tf.compat.v1.losses.add_loss(loss, loss_collection)
if add_summaries:
tf.compat.v1.summary.scalar("discriminator_gen_relativistic_loss", loss_on_generated)
tf.compat.v1.summary.scalar("discriminator_real_relativistic_loss", loss_on_real)
tf.compat.v1.summary.scalar(
"discriminator_gen_relativistic_loss", loss_on_generated
)
tf.compat.v1.summary.scalar(
"discriminator_real_relativistic_loss", loss_on_real
)
tf.compat.v1.summary.scalar("discriminator_relativistic_loss", loss)
return loss
......@@ -96,26 +100,26 @@ def relativistic_generator_loss(
):
"""Relativistic (average) loss
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`real_data`, and must be broadcastable to `real_data` (i.e., all
dimensions must be either `1`, or the same as the corresponding
dimension).
generated_weights: Same as `real_weights`, but for `generated_data`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
Args:
discriminator_real_outputs: Discriminator output on real data.
discriminator_gen_outputs: Discriminator output on generated data. Expected
to be in the range of (-inf, inf).
label_smoothing: The amount of smoothing for positive labels. This technique
is taken from `Improved Techniques for Training GANs`
(https://arxiv.org/abs/1606.03498). `0.0` means no smoothing.
real_weights: Optional `Tensor` whose rank is either 0, or the same rank as
`real_data`, and must be broadcastable to `real_data` (i.e., all
dimensions must be either `1`, or the same as the corresponding
dimension).
generated_weights: Same as `real_weights`, but for `generated_data`.
scope: The scope for the operations performed in computing the loss.
loss_collection: collection to which this loss will be added.
reduction: A `tf.compat.v1.losses.Reduction` to apply to loss.
add_summaries: Whether or not to add summaries for the loss.
Returns:
A loss Tensor. The shape depends on `reduction`.
"""
with tf.compat.v1.name_scope(
scope,
"generator_relativistic_loss",
......@@ -164,8 +168,12 @@ def relativistic_generator_loss(
tf.compat.v1.losses.add_loss(loss, loss_collection)
if add_summaries:
tf.compat.v1.summary.scalar("generator_gen_relativistic_loss", loss_on_generated)
tf.compat.v1.summary.scalar("generator_real_relativistic_loss", loss_on_real)
tf.compat.v1.summary.scalar(
"generator_gen_relativistic_loss", loss_on_generated
)
tf.compat.v1.summary.scalar(
"generator_real_relativistic_loss", loss_on_real
)
tf.compat.v1.summary.scalar("generator_relativistic_loss", loss)
return loss
......@@ -4,13 +4,13 @@ from .filter import gaussian_kernel, GaussianFilter
def __appropriate__(*args):
"""Says object was actually declared here, an not on the import module.
Parameters:
Parameters:
*args: An iterable of objects to modify
*args: An iterable of objects to modify
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
Resolves `Sphinx referencing issues
<https://github.com/sphinx-doc/sphinx/issues/3048>`
"""
for obj in args:
obj.__module__ = __name__
......
......@@ -4,6 +4,7 @@
import logging
import tensorflow as tf
logger = logging.getLogger(__name__)
......@@ -39,12 +40,9 @@ logger = logging.getLogger(__name__)
# return cross_loss
def mean_cross_entropy_center_loss(logits,
prelogits,
labels,
n_classes,
alpha=0.9,
factor=0.01):
def mean_cross_entropy_center_loss(
logits, prelogits, labels, n_classes, alpha=0.9, factor=0.01
):
"""
Implementation of the CrossEntropy + Center Loss from the paper
"A Discriminative Feature Learning Approach for Deep Face Recognition"(http://ydwen.github.io/papers/WenECCV16.pdf)
......@@ -59,44 +57,49 @@ def mean_cross_entropy_center_loss(logits,
"""
# Cross entropy
with tf.compat.v1.variable_scope('cross_entropy_loss'):
with tf.compat.v1.variable_scope("cross_entropy_loss"):
cross_loss = tf.reduce_mean(
input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits, labels=labels),
name="cross_entropy_loss")
logits=logits, labels=labels
),
name="cross_entropy_loss",
)
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, cross_loss)
tf.compat.v1.summary.scalar('loss_cross_entropy', cross_loss)
tf.compat.v1.summary.scalar("loss_cross_entropy", cross_loss)
# Appending center loss
with tf.compat.v1.variable_scope('center_loss'):
with tf.compat.v1.variable_scope("center_loss"):
n_features = prelogits.get_shape()[1]
centers = tf.compat.v1.get_variable(
'centers', [n_classes, n_features],
"centers",
[n_classes, n_features],
dtype=tf.float32,
initializer=tf.compat.v1.constant_initializer(0),
trainable=False)
trainable=False,
)
# label = tf.reshape(labels, [-1])
centers_batch = tf.gather(centers, labels)
diff = (1 - alpha) * (centers_batch - prelogits)
centers = tf.compat.v1.scatter_sub(centers, labels, diff)
center_loss = tf.reduce_mean(input_tensor=tf.square(prelogits - centers_batch))
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES,
center_loss * factor)
tf.compat.v1.summary.scalar('loss_center', center_loss)
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES, center_loss * factor
)
tf.compat.v1.summary.scalar("loss_center", center_loss)
# Adding the regularizers in the loss
with tf.compat.v1.variable_scope('total_loss'):
with tf.compat.v1.variable_scope("total_loss"):
regularization_losses = tf.compat.v1.get_collection(
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES)
total_loss = tf.add_n(
[cross_loss] + regularization_losses, name="total_loss")
tf.compat.v1.GraphKeys.REGULARIZATION_LOSSES
)
total_loss = tf.add_n([cross_loss] + regularization_losses, name="total_loss")
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, total_loss)
tf.compat.v1.summary.scalar('loss_total', total_loss)
tf.compat.v1.summary.scalar("loss_total", total_loss)
loss = dict()
loss['loss'] = total_loss
loss['centers'] = centers
loss["loss"] = total_loss
loss["centers"] = centers
return loss
......@@ -47,16 +47,24 @@ def contrastive_loss(left_embedding, right_embedding, labels, contrastive_margin
with tf.compat.v1.name_scope("within_class"):
one = tf.constant(1.0)
within_class = tf.multiply(one - labels, tf.square(d)) # (1-Y)*(d^2)
within_class_loss = tf.reduce_mean(input_tensor=within_class, name="within_class")
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, within_class_loss)
within_class_loss = tf.reduce_mean(
input_tensor=within_class, name="within_class"
)
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.LOSSES, within_class_loss
)
with tf.compat.v1.name_scope("between_class"):
max_part = tf.square(tf.maximum(contrastive_margin - d, 0))
between_class = tf.multiply(
labels, max_part
) # (Y) * max((margin - d)^2, 0)
between_class_loss = tf.reduce_mean(input_tensor=between_class, name="between_class")
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, between_class_loss)
between_class_loss = tf.reduce_mean(
input_tensor=between_class, name="between_class"
)
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.LOSSES, between_class_loss
)
with tf.compat.v1.name_scope("total_loss"):
loss = 0.5 * (within_class + between_class)
......
......@@ -5,6 +5,7 @@
import logging
import tensorflow as tf
import functools
logger = logging.getLogger(__name__)
......@@ -32,7 +33,7 @@ def content_loss(noises, content_features):
"""
content_losses = []
for n,c in zip(noises, content_features):
for n, c in zip(noises, content_features):
content_losses.append((2 * tf.nn.l2_loss(n - c) / c.size))
return functools.reduce(tf.add, content_losses)
......@@ -61,13 +62,12 @@ def linear_gram_style_loss(noises, gram_style_features):
"""
style_losses = []
for n,s in zip(noises, gram_style_features):
for n, s in zip(noises, gram_style_features):
style_losses.append((2 * tf.nn.l2_loss(n - s)) / s.size)
return functools.reduce(tf.add, style_losses)
def denoising_loss(noise):
"""
Computes the denoising loss as in:
......@@ -81,16 +81,25 @@ def denoising_loss(noise):
Input noise
"""
def _tensor_size(tensor):
from operator import mul
return functools.reduce(mul, (d.value for d in tensor.get_shape()), 1)
shape = noise.get_shape().as_list()
noise_y_size = _tensor_size(noise[:,1:,:,:])
noise_x_size = _tensor_size(noise[:,:,1:,:])
denoise_loss = 2 * ( (tf.nn.l2_loss(noise[:,1:,:,:] - noise[:,:shape[1]-1,:,:]) / noise_y_size) +
(tf.nn.l2_loss(noise[:,:,1:,:] - noise[:,:,:shape[2]-1,:]) / noise_x_size))
noise_y_size = _tensor_size(noise[:, 1:, :, :])
noise_x_size = _tensor_size(noise[:, :, 1:, :])
denoise_loss = 2 * (
(
tf.nn.l2_loss(noise[:, 1:, :, :] - noise[:, : shape[1] - 1, :, :])
/ noise_y_size
)
+ (
tf.nn.l2_loss(noise[:, :, 1:, :] - noise[:, :, : shape[2] - 1, :])
/ noise_x_size
)
)
return denoise_loss
......@@ -3,16 +3,14 @@
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
import logging
logger = logging.getLogger(__name__)
import tensorflow as tf
from bob.learn.tensorflow.utils import compute_euclidean_distance
def triplet_loss(anchor_embedding,
positive_embedding,
negative_embedding,
margin=5.0):
def triplet_loss(anchor_embedding, positive_embedding, negative_embedding, margin=5.0):
"""
Compute the triplet loss as in
......@@ -40,55 +38,70 @@ def triplet_loss(anchor_embedding,
with tf.compat.v1.name_scope("triplet_loss"):
# Normalize
anchor_embedding = tf.nn.l2_normalize(
anchor_embedding, 1, 1e-10, name="anchor")
anchor_embedding = tf.nn.l2_normalize(anchor_embedding, 1, 1e-10, name="anchor")
positive_embedding = tf.nn.l2_normalize(
positive_embedding, 1, 1e-10, name="positive")
positive_embedding, 1, 1e-10, name="positive"
)
negative_embedding = tf.nn.l2_normalize(
negative_embedding, 1, 1e-10, name="negative")
negative_embedding, 1, 1e-10, name="negative"
)
d_positive = tf.reduce_sum(
input_tensor=tf.square(tf.subtract(anchor_embedding, positive_embedding)), axis=1)
input_tensor=tf.square(tf.subtract(anchor_embedding, positive_embedding)),
axis=1,
)
d_negative = tf.reduce_sum(
input_tensor=tf.square(tf.subtract(anchor_embedding, negative_embedding)), axis=1)
input_tensor=tf.square(tf.subtract(anchor_embedding, negative_embedding)),
axis=1,
)
basic_loss = tf.add(tf.subtract(d_positive, d_negative), margin)
with tf.compat.v1.name_scope("TripletLoss"):
# Between
between_class_loss = tf.reduce_mean(input_tensor=d_negative)
tf.compat.v1.summary.scalar('loss_between_class', between_class_loss)
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, between_class_loss)
tf.compat.v1.summary.scalar("loss_between_class", between_class_loss)
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.LOSSES, between_class_loss
)
# Within
within_class_loss = tf.reduce_mean(input_tensor=d_positive)
tf.compat.v1.summary.scalar('loss_within_class', within_class_loss)
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, within_class_loss)
tf.compat.v1.summary.scalar("loss_within_class", within_class_loss)
tf.compat.v1.add_to_collection(
tf.compat.v1.GraphKeys.LOSSES, within_class_loss
)
# Total loss
loss = tf.reduce_mean(
input_tensor=tf.maximum(basic_loss, 0.0), axis=0, name="total_loss")
input_tensor=tf.maximum(basic_loss, 0.0), axis=0, name="total_loss"
)
tf.compat.v1.add_to_collection(tf.compat.v1.GraphKeys.LOSSES, loss)
tf.compat.v1.summary.scalar('loss_triplet', loss)
tf.compat.v1.summary.scalar("loss_triplet", loss)
return loss