From 905ffc94bf0b10d5d1a92f361869a142c69aaa22 Mon Sep 17 00:00:00 2001 From: Amir MOHAMMADI <amir.mohammadi@idiap.ch> Date: Wed, 17 Apr 2019 16:16:49 +0200 Subject: [PATCH] small changes: nit, bug fix, small features --- bob/learn/tensorflow/layers/Maxout.py | 57 ++++++++++--------- bob/learn/tensorflow/layers/__init__.py | 7 ++- bob/learn/tensorflow/loss/ContrastiveLoss.py | 13 ++--- bob/learn/tensorflow/network/utils.py | 7 ++- .../tensorflow/script/compute_statistics.py | 18 +++--- bob/learn/tensorflow/script/eval.py | 3 +- 6 files changed, 53 insertions(+), 52 deletions(-) diff --git a/bob/learn/tensorflow/layers/Maxout.py b/bob/learn/tensorflow/layers/Maxout.py index d1abd0e5..69c05520 100644 --- a/bob/learn/tensorflow/layers/Maxout.py +++ b/bob/learn/tensorflow/layers/Maxout.py @@ -3,22 +3,19 @@ # @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch> # @date: Fri 04 Aug 2017 14:14:22 CEST -## MAXOUT IMPLEMENTED FOR TENSORFLOW - -from tensorflow.python.framework import ops -from tensorflow.python.ops import math_ops -from tensorflow.python.ops import gen_array_ops +# MAXOUT IMPLEMENTED FOR TENSORFLOW from tensorflow.python.layers import base +import tensorflow as tf def maxout(inputs, num_units, axis=-1, name=None): - return MaxOut(num_units=num_units, axis=axis, name=name)(inputs) + return Maxout(num_units=num_units, axis=axis, name=name)(inputs) -class MaxOut(base.Layer): +class Maxout(base.Layer): """ - Adds a maxout op from + Adds a maxout op from "Maxout Networks" @@ -41,33 +38,37 @@ class MaxOut(base.Layer): """ def __init__(self, num_units, axis=-1, name=None, **kwargs): - super(MaxOut, self).__init__(name=name, trainable=False, **kwargs) + super(Maxout, self).__init__(name=name, trainable=False, **kwargs) self.axis = axis self.num_units = num_units def call(self, inputs, training=False): - inputs = ops.convert_to_tensor(inputs) + inputs = tf.convert_to_tensor(inputs) shape = inputs.get_shape().as_list() - if self.axis is None: - # Assume that channel is the last dimension - self.axis = -1 - num_channels = shape[self.axis] - if num_channels % self.num_units: - raise ValueError('number of features({}) is not ' - 'a multiple of num_units({})'.format( - num_channels, self.num_units)) - shape[self.axis] = -1 - shape += [num_channels // self.num_units] - # Dealing with batches with arbitrary sizes for i in range(len(shape)): if shape[i] is None: - shape[i] = gen_array_ops.shape(inputs)[i] - - outputs = math_ops.reduce_max( - gen_array_ops.reshape(inputs, shape), -1, keep_dims=False) - shape = outputs.get_shape().as_list() - shape[self.axis] = self.num_units - outputs.set_shape(shape) + shape[i] = tf.shape(inputs)[i] + num_channels = shape[self.axis] + if not isinstance(num_channels, tf.Tensor) and num_channels % self.num_units: + raise ValueError( + "number of features({}) is not " + "a multiple of num_units({})".format(num_channels, self.num_units) + ) + + if self.axis < 0: + axis = self.axis + len(shape) + else: + axis = self.axis + assert axis >= 0, "Find invalid axis: {}".format(self.axis) + + expand_shape = shape[:] + expand_shape[axis] = self.num_units + k = num_channels // self.num_units + expand_shape.insert(axis, k) + + outputs = tf.math.reduce_max( + tf.reshape(inputs, expand_shape), axis, keepdims=False + ) return outputs diff --git a/bob/learn/tensorflow/layers/__init__.py b/bob/learn/tensorflow/layers/__init__.py index 8a810de3..a392020c 100644 --- a/bob/learn/tensorflow/layers/__init__.py +++ b/bob/learn/tensorflow/layers/__init__.py @@ -1,4 +1,4 @@ -from .Maxout import maxout +from .Maxout import Maxout, maxout # gets sphinx autodoc done right - don't remove it @@ -17,5 +17,8 @@ def __appropriate__(*args): obj.__module__ = __name__ -__appropriate__(maxout) +__appropriate__( + Maxout, + maxout, +) __all__ = [_ for _ in dir() if not _.startswith('_')] diff --git a/bob/learn/tensorflow/loss/ContrastiveLoss.py b/bob/learn/tensorflow/loss/ContrastiveLoss.py index a949aba5..81e3ac7b 100644 --- a/bob/learn/tensorflow/loss/ContrastiveLoss.py +++ b/bob/learn/tensorflow/loss/ContrastiveLoss.py @@ -19,9 +19,9 @@ def contrastive_loss(left_embedding, http://yann.lecun.com/exdb/publis/pdf/hadsell-chopra-lecun-06.pdf :math:`L = 0.5 * (1-Y) * D^2 + 0.5 * (Y) * {max(0, margin - D)}^2` - + where, `0` are assign for pairs from the same class and `1` from pairs from different classes. - + **Parameters** @@ -65,15 +65,10 @@ def contrastive_loss(left_embedding, with tf.name_scope("total_loss"): loss = 0.5 * (within_class + between_class) - loss = tf.reduce_mean(loss, name="total_loss_raw") - tf.summary.scalar('loss_raw', loss) + loss = tf.reduce_mean(loss, name="contrastive_loss") tf.add_to_collection(tf.GraphKeys.LOSSES, loss) - ## Appending the regularization loss - #regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) - #loss = tf.add_n([loss] + regularization_losses, name="total_loss") - - tf.summary.scalar('loss', loss) + tf.summary.scalar('contrastive_loss', loss) tf.summary.scalar('between_class', between_class_loss) tf.summary.scalar('within_class', within_class_loss) diff --git a/bob/learn/tensorflow/network/utils.py b/bob/learn/tensorflow/network/utils.py index a98b3c79..780a1682 100644 --- a/bob/learn/tensorflow/network/utils.py +++ b/bob/learn/tensorflow/network/utils.py @@ -10,8 +10,9 @@ def append_logits(graph, n_classes, reuse=False, l2_regularizer=5e-05, - weights_std=0.1, trainable_variables=None): - trainable = is_trainable('Logits', trainable_variables) + weights_std=0.1, trainable_variables=None, + name='Logits'): + trainable = is_trainable(name, trainable_variables) return slim.fully_connected( graph, n_classes, @@ -19,7 +20,7 @@ def append_logits(graph, weights_initializer=tf.truncated_normal_initializer( stddev=weights_std), weights_regularizer=slim.l2_regularizer(l2_regularizer), - scope='Logits', + scope=name, reuse=reuse, trainable=trainable, ) diff --git a/bob/learn/tensorflow/script/compute_statistics.py b/bob/learn/tensorflow/script/compute_statistics.py index a8945a4c..b876c5e4 100644 --- a/bob/learn/tensorflow/script/compute_statistics.py +++ b/bob/learn/tensorflow/script/compute_statistics.py @@ -15,7 +15,15 @@ logger = logging.getLogger(__name__) @click.command( - entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand, + epilog="""\b +An example configuration could be:: + # define the database: + from bob.bio.base.test.dummy.database import database + groups = ['dev'] + biofiles = database.all_files(groups) +""" +) @click.option( '--database', '-d', @@ -50,14 +58,6 @@ def compute_statistics(database, biofiles, load_data, multiple_samples, This script works with bob.bio.base databases. It will load all the samples and print their mean. - - An example configuration could be:: - - # define the database: - from bob.bio.base.test.dummy.database import database - - groups = ['dev'] - biofiles = database.all_files(groups) """ log_parameters(logger, ignore=('biofiles', )) logger.debug("len(biofiles): %d", len(biofiles)) diff --git a/bob/learn/tensorflow/script/eval.py b/bob/learn/tensorflow/script/eval.py index 95cbb12a..99bd4e54 100644 --- a/bob/learn/tensorflow/script/eval.py +++ b/bob/learn/tensorflow/script/eval.py @@ -264,7 +264,8 @@ def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name, continue # evaluate based on the just copied checkpoint_path - checkpoint_path = checkpoint_path.replace(estimator.model_dir, eval_dir) + checkpoint_path = checkpoint_path.replace(estimator.model_dir, eval_dir + os.sep) + checkpoint_path = os.path.abspath(checkpoint_path) logger.debug("Evaluating the model from %s", checkpoint_path) # Evaluate -- GitLab