diff --git a/bob/learn/tensorflow/dataset/__init__.py b/bob/learn/tensorflow/dataset/__init__.py index 85b22e8178333bd592f3158932d733299d7d084a..7c0af9b4e73d12e1372baa2a8bc03b83ba6f608a 100644 --- a/bob/learn/tensorflow/dataset/__init__.py +++ b/bob/learn/tensorflow/dataset/__init__.py @@ -97,8 +97,10 @@ def append_image_augmentation(image, image = tf.image.random_saturation(image, lower=0, upper=0.5) if random_rotate: - image = tf.contrib.image.rotate(image, angles=numpy.random.randint(-5,5), - interpolation="BILINEAR") + image = tf.contrib.image.rotate( + image, + angles=numpy.random.randint(-5, 5), + interpolation="BILINEAR") if gray_scale: image = tf.image.rgb_to_grayscale(image, name="rgb_to_gray") diff --git a/bob/learn/tensorflow/dataset/bio.py b/bob/learn/tensorflow/dataset/bio.py index 4a53d28e6ade7b0452ff3ad6ae9987661f7807d7..c33d6fabe8f4beefec4df35a74d39be9919ba338 100644 --- a/bob/learn/tensorflow/dataset/bio.py +++ b/bob/learn/tensorflow/dataset/bio.py @@ -80,9 +80,10 @@ class BioGenerator(object): self._output_shapes = (data.shape, tf.TensorShape([]), tf.TensorShape([])) - logger.info("Initializing a dataset with %d files and %s types " - "and %s shapes", len(self.biofiles), self.output_types, - self.output_shapes) + logger.info( + "Initializing a dataset with %d files and %s types " + "and %s shapes", len(self.biofiles), self.output_types, + self.output_shapes) @property def labels(self): diff --git a/bob/learn/tensorflow/estimators/Logits.py b/bob/learn/tensorflow/estimators/Logits.py index e323278e85d58bbd010cfe9991ffaad3200d5b8f..dcb24440793110307107210d77c46861b3442fa2 100755 --- a/bob/learn/tensorflow/estimators/Logits.py +++ b/bob/learn/tensorflow/estimators/Logits.py @@ -136,8 +136,10 @@ class Logits(estimator.Estimator): # Compute the moving average of all individual losses and the total loss. if apply_moving_averages: - variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step) - variable_averages_op = variable_averages.apply(tf.trainable_variables()) + variable_averages = tf.train.ExponentialMovingAverage( + 0.9999, global_step) + variable_averages_op = variable_averages.apply( + tf.trainable_variables()) else: variable_averages_op = tf.no_op(name='noop') @@ -147,15 +149,20 @@ class Logits(estimator.Estimator): self.loss = self.loss_op(logits=logits, labels=labels) # Compute the moving average of all individual losses and the total loss. - loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') - loss_averages_op = loss_averages.apply(tf.get_collection(tf.GraphKeys.LOSSES)) + loss_averages = tf.train.ExponentialMovingAverage( + 0.9, name='avg') + loss_averages_op = loss_averages.apply( + tf.get_collection(tf.GraphKeys.LOSSES)) for l in tf.get_collection(tf.GraphKeys.LOSSES): - tf.summary.scalar(l.op.name+"_averaged", loss_averages.average(l)) + tf.summary.scalar(l.op.name + "_averaged", + loss_averages.average(l)) global_step = tf.train.get_or_create_global_step() - train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step), - variable_averages_op, loss_averages_op) + train_op = tf.group( + self.optimizer.minimize( + self.loss, global_step=global_step), + variable_averages_op, loss_averages_op) return tf.estimator.EstimatorSpec( mode=mode, loss=self.loss, train_op=train_op) @@ -285,21 +292,19 @@ class LogitsCenterLoss(estimator.Estimator): """ - def __init__( - self, - architecture=None, - optimizer=None, - config=None, - n_classes=0, - embedding_validation=False, - model_dir="", - alpha=0.9, - factor=0.01, - validation_batch_size=None, - params=None, - extra_checkpoint=None, - apply_moving_averages=True - ): + def __init__(self, + architecture=None, + optimizer=None, + config=None, + n_classes=0, + embedding_validation=False, + model_dir="", + alpha=0.9, + factor=0.01, + validation_batch_size=None, + params=None, + extra_checkpoint=None, + apply_moving_averages=True): self.architecture = architecture self.optimizer = optimizer @@ -344,8 +349,10 @@ class LogitsCenterLoss(estimator.Estimator): # Compute the moving average of all individual losses and the total loss. if apply_moving_averages: - variable_averages = tf.train.ExponentialMovingAverage(0.9999, global_step) - variable_averages_op = variable_averages.apply(tf.trainable_variables()) + variable_averages = tf.train.ExponentialMovingAverage( + 0.9999, global_step) + variable_averages_op = variable_averages.apply( + tf.trainable_variables()) else: variable_averages_op = tf.no_op(name='noop') @@ -363,8 +370,10 @@ class LogitsCenterLoss(estimator.Estimator): centers = loss_dict['centers'] # Compute the moving average of all individual losses and the total loss. - loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg') - loss_averages_op = loss_averages.apply(tf.get_collection(tf.GraphKeys.LOSSES)) + loss_averages = tf.train.ExponentialMovingAverage( + 0.9, name='avg') + loss_averages_op = loss_averages.apply( + tf.get_collection(tf.GraphKeys.LOSSES)) for l in tf.get_collection(tf.GraphKeys.LOSSES): tf.summary.scalar(l.op.name, loss_averages.average(l)) @@ -376,7 +385,8 @@ class LogitsCenterLoss(estimator.Estimator): train_op = tf.group( self.optimizer.minimize( - self.loss, global_step=global_step), centers, variable_averages_op, loss_averages_op) + self.loss, global_step=global_step), centers, + variable_averages_op, loss_averages_op) return tf.estimator.EstimatorSpec( mode=mode, loss=self.loss, train_op=train_op) diff --git a/bob/learn/tensorflow/estimators/Siamese.py b/bob/learn/tensorflow/estimators/Siamese.py index 5d9316693afca682d613571c7d8b904e59252495..c33a47ff0bec92e90d269f29a2ef3d1559f98830 100755 --- a/bob/learn/tensorflow/estimators/Siamese.py +++ b/bob/learn/tensorflow/estimators/Siamese.py @@ -179,4 +179,3 @@ class Siamese(estimator.Estimator): model_dir=model_dir, params=params, config=config) - diff --git a/bob/learn/tensorflow/estimators/Triplet.py b/bob/learn/tensorflow/estimators/Triplet.py index 26710297b887bc0cdbc9aad9c460d1d972605ea9..c22098e5252af7d4836fe02f958bb5e426f5ce84 100644 --- a/bob/learn/tensorflow/estimators/Triplet.py +++ b/bob/learn/tensorflow/estimators/Triplet.py @@ -172,4 +172,3 @@ class Triplet(estimator.Estimator): super(Triplet, self).__init__( model_fn=_model_fn, model_dir=model_dir, config=config) - diff --git a/bob/learn/tensorflow/layers/Maxout.py b/bob/learn/tensorflow/layers/Maxout.py index 4f44491063d19d5c56ffecf470e41ae3b2bb780a..d1abd0e5f27a1135e03d6e93ac1a6c3a34ff3319 100644 --- a/bob/learn/tensorflow/layers/Maxout.py +++ b/bob/learn/tensorflow/layers/Maxout.py @@ -71,4 +71,3 @@ class MaxOut(base.Layer): outputs.set_shape(shape) return outputs - diff --git a/bob/learn/tensorflow/loss/BaseLoss.py b/bob/learn/tensorflow/loss/BaseLoss.py index 6053b8b543d334c54c91df719bfb51d7a1f47914..1d5cfcc6b70e54030f6e9c274d07a4cb8d789677 100644 --- a/bob/learn/tensorflow/loss/BaseLoss.py +++ b/bob/learn/tensorflow/loss/BaseLoss.py @@ -33,8 +33,9 @@ def mean_cross_entropy_loss(logits, labels, add_regularization_losses=True): if add_regularization_losses: regularization_losses = tf.get_collection( tf.GraphKeys.REGULARIZATION_LOSSES) - - total_loss = tf.add_n([cross_loss] + regularization_losses, name="total_loss") + + total_loss = tf.add_n( + [cross_loss] + regularization_losses, name="total_loss") return total_loss else: return cross_loss diff --git a/bob/learn/tensorflow/loss/ContrastiveLoss.py b/bob/learn/tensorflow/loss/ContrastiveLoss.py index 2db184093c43e79c59ea93de7675e0d57d052011..a949aba59d798b52a159360c78c5a7688d914b7e 100644 --- a/bob/learn/tensorflow/loss/ContrastiveLoss.py +++ b/bob/learn/tensorflow/loss/ContrastiveLoss.py @@ -68,15 +68,13 @@ def contrastive_loss(left_embedding, loss = tf.reduce_mean(loss, name="total_loss_raw") tf.summary.scalar('loss_raw', loss) tf.add_to_collection(tf.GraphKeys.LOSSES, loss) - + ## Appending the regularization loss #regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) #loss = tf.add_n([loss] + regularization_losses, name="total_loss") - - + tf.summary.scalar('loss', loss) tf.summary.scalar('between_class', between_class_loss) tf.summary.scalar('within_class', within_class_loss) return loss - diff --git a/bob/learn/tensorflow/loss/TripletLoss.py b/bob/learn/tensorflow/loss/TripletLoss.py index f7c1d6cce323967c194052716ed4dc2031acd1f6..97fb1a99859c04cca203e29322c8137e5c9dde97 100644 --- a/bob/learn/tensorflow/loss/TripletLoss.py +++ b/bob/learn/tensorflow/loss/TripletLoss.py @@ -57,19 +57,19 @@ def triplet_loss(anchor_embedding, with tf.name_scope("TripletLoss"): # Between between_class_loss = tf.reduce_mean(d_negative) - tf.summary.scalar('between_class', between_class_loss) + tf.summary.scalar('between_class', between_class_loss) tf.add_to_collection(tf.GraphKeys.LOSSES, between_class_loss) - + # Within within_class_loss = tf.reduce_mean(d_positive) - tf.summary.scalar('within_class', within_class_loss) + tf.summary.scalar('within_class', within_class_loss) tf.add_to_collection(tf.GraphKeys.LOSSES, within_class_loss) # Total loss loss = tf.reduce_mean( tf.maximum(basic_loss, 0.0), 0, name="total_loss") tf.add_to_collection(tf.GraphKeys.LOSSES, loss) - tf.summary.scalar('loss_raw', loss) + tf.summary.scalar('loss_raw', loss) # Appending the regularization loss #regularization_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES) diff --git a/bob/learn/tensorflow/network/Chopra.py b/bob/learn/tensorflow/network/Chopra.py index cbdbbd070ca91313ea1031246ac83b59a81ad277..a3242e40b3a925cdd5a4a4a228beb1c4726003d6 100644 --- a/bob/learn/tensorflow/network/Chopra.py +++ b/bob/learn/tensorflow/network/Chopra.py @@ -107,4 +107,3 @@ def chopra( end_points['fc1'] = graph return graph, end_points - diff --git a/bob/learn/tensorflow/network/Dummy.py b/bob/learn/tensorflow/network/Dummy.py index 1af12ed274ec5cb647dc14ce6f3e139d142939be..917f30863a6fffafe6b590b44bdc8d4a785da587 100644 --- a/bob/learn/tensorflow/network/Dummy.py +++ b/bob/learn/tensorflow/network/Dummy.py @@ -62,4 +62,3 @@ def dummy(inputs, end_points[name] = graph return graph, end_points - diff --git a/bob/learn/tensorflow/network/InceptionResnetV1.py b/bob/learn/tensorflow/network/InceptionResnetV1.py index f65c323a12b262ca229a3d45149007a01c9c9a71..5e7d37609587b1be90cd2bd890b0eaa150b88951 100644 --- a/bob/learn/tensorflow/network/InceptionResnetV1.py +++ b/bob/learn/tensorflow/network/InceptionResnetV1.py @@ -26,6 +26,7 @@ import tensorflow as tf import tensorflow.contrib.slim as slim from .utils import is_trainable + # Inception-Renset-A def block35(net, scale=1.0, @@ -254,15 +255,16 @@ def reduction_b(net, reuse=None, trainable_variables=True): 3) return net + def inception_resnet_v1_batch_norm(inputs, - dropout_keep_prob=0.8, - bottleneck_layer_size=128, - reuse=None, - scope='InceptionResnetV1', - mode=tf.estimator.ModeKeys.TRAIN, - trainable_variables=None, - weight_decay=1e-5, - **kwargs): + dropout_keep_prob=0.8, + bottleneck_layer_size=128, + reuse=None, + scope='InceptionResnetV1', + mode=tf.estimator.ModeKeys.TRAIN, + trainable_variables=None, + weight_decay=1e-5, + **kwargs): """ Creates the Inception Resnet V1 model applying batch not to each Convolutional and FullyConnected layer. @@ -292,7 +294,6 @@ def inception_resnet_v1_batch_norm(inputs, end_points: the set of end_points from the inception model. """ - batch_norm_params = { # Decay for the moving averages. @@ -304,20 +305,22 @@ def inception_resnet_v1_batch_norm(inputs, # Moving averages ends up in the trainable variables collection 'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES], } - + with slim.arg_scope( [slim.conv2d, slim.fully_connected], weights_initializer=tf.truncated_normal_initializer(stddev=0.1), weights_regularizer=slim.l2_regularizer(weight_decay), normalizer_fn=slim.batch_norm, normalizer_params=batch_norm_params): - return inception_resnet_v1(inputs, - dropout_keep_prob=0.8, - bottleneck_layer_size=128, - reuse=None, - scope='InceptionResnetV1', - mode=mode, - trainable_variables=None,) + return inception_resnet_v1( + inputs, + dropout_keep_prob=0.8, + bottleneck_layer_size=128, + reuse=None, + scope='InceptionResnetV1', + mode=mode, + trainable_variables=None, + ) def inception_resnet_v1(inputs, @@ -327,7 +330,7 @@ def inception_resnet_v1(inputs, scope='InceptionResnetV1', mode=tf.estimator.ModeKeys.TRAIN, trainable_variables=None, - **kwargs): + **kwargs): """ Creates the Inception Resnet V1 model. @@ -363,7 +366,6 @@ def inception_resnet_v1(inputs, [slim.batch_norm, slim.dropout], is_training=(mode == tf.estimator.ModeKeys.TRAIN)): - with slim.arg_scope( [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], stride=1, @@ -382,7 +384,7 @@ def inception_resnet_v1(inputs, trainable=trainable, reuse=reuse) end_points[name] = net - + # 147 x 147 x 32 name = "Conv2d_2a_3x3" trainable = is_trainable(name, trainable_variables) @@ -395,23 +397,18 @@ def inception_resnet_v1(inputs, trainable=trainable, reuse=reuse) end_points[name] = net - + # 147 x 147 x 64 name = "Conv2d_2b_3x3" trainable = is_trainable(name, trainable_variables) net = slim.conv2d( - net, - 64, - 3, - scope=name, - trainable=trainable, - reuse=reuse) + net, 64, 3, scope=name, trainable=trainable, reuse=reuse) end_points[name] = net # 73 x 73 x 64 net = slim.max_pool2d( net, 3, stride=2, padding='VALID', scope='MaxPool_3a_3x3') end_points['MaxPool_3a_3x3'] = net - + # 73 x 73 x 80 name = "Conv2d_3b_1x1" trainable = is_trainable(name, trainable_variables) @@ -437,7 +434,7 @@ def inception_resnet_v1(inputs, trainable=trainable, reuse=reuse) end_points[name] = net - + # 35 x 35 x 256 name = "Conv2d_4b_3x3" trainable = is_trainable(name, trainable_variables) @@ -495,9 +492,7 @@ def inception_resnet_v1(inputs, trainable = is_trainable(name, trainable_variables) with tf.variable_scope(name): net = reduction_b( - net, - trainable_variables=trainable, - reuse=reuse) + net, trainable_variables=trainable, reuse=reuse) end_points[name] = net # 5 x Inception-Resnet-C diff --git a/bob/learn/tensorflow/network/InceptionResnetV2.py b/bob/learn/tensorflow/network/InceptionResnetV2.py index f43ce56be902870289602a32c3740aff25654b3f..fcdb4c13768055f0e4c0b6a33b528259a8734ead 100644 --- a/bob/learn/tensorflow/network/InceptionResnetV2.py +++ b/bob/learn/tensorflow/network/InceptionResnetV2.py @@ -204,14 +204,14 @@ def block8(net, def inception_resnet_v2_batch_norm(inputs, - dropout_keep_prob=0.8, - bottleneck_layer_size=128, - reuse=None, - scope='InceptionResnetV2', - mode=tf.estimator.ModeKeys.TRAIN, - trainable_variables=None, - weight_decay = 5e-5, - **kwargs): + dropout_keep_prob=0.8, + bottleneck_layer_size=128, + reuse=None, + scope='InceptionResnetV2', + mode=tf.estimator.ModeKeys.TRAIN, + trainable_variables=None, + weight_decay=5e-5, + **kwargs): """ Creates the Inception Resnet V2 model applying batch not to each Convolutional and FullyConnected layer. @@ -241,7 +241,6 @@ def inception_resnet_v2_batch_norm(inputs, end_points: the set of end_points from the inception model. """ - batch_norm_params = { # Decay for the moving averages. 'decay': 0.995, @@ -310,337 +309,337 @@ def inception_resnet_v2(inputs, [slim.batch_norm, slim.dropout], is_training=(mode == tf.estimator.ModeKeys.TRAIN)): - with slim.arg_scope( - [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], - stride=1, - padding='SAME'): - # 149 x 149 x 32 - name = "Conv2d_1a_3x3" - trainable = is_trainable(name, trainable_variables) - net = slim.conv2d( - inputs, - 32, - 3, - stride=2, - padding='VALID', - scope=name, - trainable=trainable, - reuse=reuse) - end_points[name] = net - - # 147 x 147 x 32 - name = "Conv2d_2a_3x3" - trainable = is_trainable(name, trainable_variables) - net = slim.conv2d( - net, - 32, - 3, - padding='VALID', - scope=name, - trainable=trainable, - reuse=reuse) - end_points[name] = net - - # 147 x 147 x 64 - name = "Conv2d_2b_3x3" - trainable = is_trainable(name, trainable_variables) - net = slim.conv2d( - net, 64, 3, scope=name, trainable=trainable, reuse=reuse) - end_points[name] = net - - # 73 x 73 x 64 - net = slim.max_pool2d( - net, 3, stride=2, padding='VALID', scope='MaxPool_3a_3x3') - end_points['MaxPool_3a_3x3'] = net - - # 73 x 73 x 80 - name = "Conv2d_3b_1x1" - trainable = is_trainable(name, trainable_variables) - net = slim.conv2d( - net, - 80, - 1, - padding='VALID', - scope=name, - trainable=trainable, - reuse=reuse) - end_points[name] = net - - # 71 x 71 x 192 - name = "Conv2d_4a_3x3" - trainable = is_trainable(name, trainable_variables) - net = slim.conv2d( - net, - 192, - 3, - padding='VALID', - scope=name, - trainable=trainable, - reuse=reuse) - end_points[name] = net - - # 35 x 35 x 192 - net = slim.max_pool2d( - net, 3, stride=2, padding='VALID', scope='MaxPool_5a_3x3') - end_points['MaxPool_5a_3x3'] = net - - # 35 x 35 x 320 - name = "Mixed_5b" - trainable = is_trainable(name, trainable_variables) - with tf.variable_scope(name): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d( - net, - 96, - 1, - scope='Conv2d_1x1', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d( - net, - 48, - 1, - scope='Conv2d_0a_1x1', - trainable=trainable, - reuse=reuse) - tower_conv1_1 = slim.conv2d( - tower_conv1_0, - 64, - 5, - scope='Conv2d_0b_5x5', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_2'): - tower_conv2_0 = slim.conv2d( - net, - 64, - 1, - scope='Conv2d_0a_1x1', - trainable=trainable, - reuse=reuse) - tower_conv2_1 = slim.conv2d( - tower_conv2_0, - 96, - 3, - scope='Conv2d_0b_3x3', - trainable=trainable, - reuse=reuse) - tower_conv2_2 = slim.conv2d( - tower_conv2_1, - 96, - 3, - scope='Conv2d_0c_3x3', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_3'): - tower_pool = slim.avg_pool2d( - net, - 3, - stride=1, - padding='SAME', - scope='AvgPool_0a_3x3') - tower_pool_1 = slim.conv2d( - tower_pool, - 64, - 1, - scope='Conv2d_0b_1x1', - trainable=trainable, - reuse=reuse) - net = tf.concat([ - tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1 - ], 3) - end_points[name] = net - - # BLOCK 35 - name = "Block35" - trainable = is_trainable(name, trainable_variables) - net = slim.repeat( - net, - 10, - block35, - scale=0.17, - trainable_variables=trainable, - reuse=reuse) - - # 17 x 17 x 1024 - name = "Mixed_6a" - trainable = is_trainable(name, trainable_variables) - with tf.variable_scope(name): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d( - net, - 384, - 3, - stride=2, - padding='VALID', - scope='Conv2d_1a_3x3', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_1'): - tower_conv1_0 = slim.conv2d( - net, - 256, - 1, - scope='Conv2d_0a_1x1', - trainable=trainable, - reuse=reuse) - tower_conv1_1 = slim.conv2d( - tower_conv1_0, - 256, - 3, - scope='Conv2d_0b_3x3', - trainable=trainable, - reuse=reuse) - tower_conv1_2 = slim.conv2d( - tower_conv1_1, - 384, - 3, - stride=2, - padding='VALID', - scope='Conv2d_1a_3x3', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_2'): - tower_pool = slim.max_pool2d( - net, - 3, - stride=2, - padding='VALID', - scope='MaxPool_1a_3x3') - net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) - - end_points[name] = net - - # BLOCK 17 - name = "Block17" - trainable = is_trainable(name, trainable_variables) - net = slim.repeat( - net, - 20, - block17, - scale=0.10, - trainable_variables=trainable, - reuse=reuse) - - name = "Mixed_7a" - trainable = is_trainable(name, trainable_variables) - with tf.variable_scope(name): - with tf.variable_scope('Branch_0'): - tower_conv = slim.conv2d( - net, - 256, - 1, - scope='Conv2d_0a_1x1', - trainable=trainable, - reuse=reuse) - tower_conv_1 = slim.conv2d( - tower_conv, - 384, - 3, - stride=2, - padding='VALID', - scope='Conv2d_1a_3x3', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_1'): - tower_conv1 = slim.conv2d( - net, - 256, - 1, - scope='Conv2d_0a_1x1', - trainable=trainable, - reuse=reuse) - tower_conv1_1 = slim.conv2d( - tower_conv1, - 288, - 3, - stride=2, - padding='VALID', - scope='Conv2d_1a_3x3', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_2'): - tower_conv2 = slim.conv2d( - net, - 256, - 1, - scope='Conv2d_0a_1x1', - trainable=trainable, - reuse=reuse) - tower_conv2_1 = slim.conv2d( - tower_conv2, - 288, - 3, - scope='Conv2d_0b_3x3', - trainable=trainable, - reuse=reuse) - tower_conv2_2 = slim.conv2d( - tower_conv2_1, - 320, - 3, - stride=2, - padding='VALID', - scope='Conv2d_1a_3x3', - trainable=trainable, - reuse=reuse) - with tf.variable_scope('Branch_3'): - tower_pool = slim.max_pool2d( - net, - 3, - stride=2, - padding='VALID', - scope='MaxPool_1a_3x3') - net = tf.concat([ - tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool - ], 3) - end_points[name] = net - - # Block 8 - name = "Block8" - trainable = is_trainable(name, trainable_variables) - net = slim.repeat( - net, - 9, - block8, - scale=0.20, - trainable_variables=trainable, - reuse=reuse) - net = block8( - net, - activation_fn=None, - trainable_variables=trainable, - reuse=reuse) - - name = "Conv2d_7b_1x1" - trainable = is_trainable(name, trainable_variables) - net = slim.conv2d( - net, 1536, 1, scope=name, trainable=trainable, reuse=reuse) - end_points[name] = net - - with tf.variable_scope('Logits'): - end_points['PrePool'] = net - # pylint: disable=no-member - net = slim.avg_pool2d( + with slim.arg_scope( + [slim.conv2d, slim.max_pool2d, slim.avg_pool2d], + stride=1, + padding='SAME'): + # 149 x 149 x 32 + name = "Conv2d_1a_3x3" + trainable = is_trainable(name, trainable_variables) + net = slim.conv2d( + inputs, + 32, + 3, + stride=2, + padding='VALID', + scope=name, + trainable=trainable, + reuse=reuse) + end_points[name] = net + + # 147 x 147 x 32 + name = "Conv2d_2a_3x3" + trainable = is_trainable(name, trainable_variables) + net = slim.conv2d( + net, + 32, + 3, + padding='VALID', + scope=name, + trainable=trainable, + reuse=reuse) + end_points[name] = net + + # 147 x 147 x 64 + name = "Conv2d_2b_3x3" + trainable = is_trainable(name, trainable_variables) + net = slim.conv2d( + net, 64, 3, scope=name, trainable=trainable, reuse=reuse) + end_points[name] = net + + # 73 x 73 x 64 + net = slim.max_pool2d( + net, 3, stride=2, padding='VALID', scope='MaxPool_3a_3x3') + end_points['MaxPool_3a_3x3'] = net + + # 73 x 73 x 80 + name = "Conv2d_3b_1x1" + trainable = is_trainable(name, trainable_variables) + net = slim.conv2d( + net, + 80, + 1, + padding='VALID', + scope=name, + trainable=trainable, + reuse=reuse) + end_points[name] = net + + # 71 x 71 x 192 + name = "Conv2d_4a_3x3" + trainable = is_trainable(name, trainable_variables) + net = slim.conv2d( + net, + 192, + 3, + padding='VALID', + scope=name, + trainable=trainable, + reuse=reuse) + end_points[name] = net + + # 35 x 35 x 192 + net = slim.max_pool2d( + net, 3, stride=2, padding='VALID', scope='MaxPool_5a_3x3') + end_points['MaxPool_5a_3x3'] = net + + # 35 x 35 x 320 + name = "Mixed_5b" + trainable = is_trainable(name, trainable_variables) + with tf.variable_scope(name): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d( + net, + 96, + 1, + scope='Conv2d_1x1', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d( net, - net.get_shape()[1:3], + 48, + 1, + scope='Conv2d_0a_1x1', + trainable=trainable, + reuse=reuse) + tower_conv1_1 = slim.conv2d( + tower_conv1_0, + 64, + 5, + scope='Conv2d_0b_5x5', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_2'): + tower_conv2_0 = slim.conv2d( + net, + 64, + 1, + scope='Conv2d_0a_1x1', + trainable=trainable, + reuse=reuse) + tower_conv2_1 = slim.conv2d( + tower_conv2_0, + 96, + 3, + scope='Conv2d_0b_3x3', + trainable=trainable, + reuse=reuse) + tower_conv2_2 = slim.conv2d( + tower_conv2_1, + 96, + 3, + scope='Conv2d_0c_3x3', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_3'): + tower_pool = slim.avg_pool2d( + net, + 3, + stride=1, + padding='SAME', + scope='AvgPool_0a_3x3') + tower_pool_1 = slim.conv2d( + tower_pool, + 64, + 1, + scope='Conv2d_0b_1x1', + trainable=trainable, + reuse=reuse) + net = tf.concat([ + tower_conv, tower_conv1_1, tower_conv2_2, tower_pool_1 + ], 3) + end_points[name] = net + + # BLOCK 35 + name = "Block35" + trainable = is_trainable(name, trainable_variables) + net = slim.repeat( + net, + 10, + block35, + scale=0.17, + trainable_variables=trainable, + reuse=reuse) + + # 17 x 17 x 1024 + name = "Mixed_6a" + trainable = is_trainable(name, trainable_variables) + with tf.variable_scope(name): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d( + net, + 384, + 3, + stride=2, padding='VALID', - scope='AvgPool_1a_8x8') - net = slim.flatten(net) - - net = slim.dropout(net, dropout_keep_prob, scope='Dropout') - - end_points['PreLogitsFlatten'] = net - - name = "Bottleneck" - trainable = is_trainable(name, trainable_variables) - net = slim.fully_connected( + scope='Conv2d_1a_3x3', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_1'): + tower_conv1_0 = slim.conv2d( + net, + 256, + 1, + scope='Conv2d_0a_1x1', + trainable=trainable, + reuse=reuse) + tower_conv1_1 = slim.conv2d( + tower_conv1_0, + 256, + 3, + scope='Conv2d_0b_3x3', + trainable=trainable, + reuse=reuse) + tower_conv1_2 = slim.conv2d( + tower_conv1_1, + 384, + 3, + stride=2, + padding='VALID', + scope='Conv2d_1a_3x3', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_2'): + tower_pool = slim.max_pool2d( + net, + 3, + stride=2, + padding='VALID', + scope='MaxPool_1a_3x3') + net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) + + end_points[name] = net + + # BLOCK 17 + name = "Block17" + trainable = is_trainable(name, trainable_variables) + net = slim.repeat( + net, + 20, + block17, + scale=0.10, + trainable_variables=trainable, + reuse=reuse) + + name = "Mixed_7a" + trainable = is_trainable(name, trainable_variables) + with tf.variable_scope(name): + with tf.variable_scope('Branch_0'): + tower_conv = slim.conv2d( + net, + 256, + 1, + scope='Conv2d_0a_1x1', + trainable=trainable, + reuse=reuse) + tower_conv_1 = slim.conv2d( + tower_conv, + 384, + 3, + stride=2, + padding='VALID', + scope='Conv2d_1a_3x3', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_1'): + tower_conv1 = slim.conv2d( + net, + 256, + 1, + scope='Conv2d_0a_1x1', + trainable=trainable, + reuse=reuse) + tower_conv1_1 = slim.conv2d( + tower_conv1, + 288, + 3, + stride=2, + padding='VALID', + scope='Conv2d_1a_3x3', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_2'): + tower_conv2 = slim.conv2d( + net, + 256, + 1, + scope='Conv2d_0a_1x1', + trainable=trainable, + reuse=reuse) + tower_conv2_1 = slim.conv2d( + tower_conv2, + 288, + 3, + scope='Conv2d_0b_3x3', + trainable=trainable, + reuse=reuse) + tower_conv2_2 = slim.conv2d( + tower_conv2_1, + 320, + 3, + stride=2, + padding='VALID', + scope='Conv2d_1a_3x3', + trainable=trainable, + reuse=reuse) + with tf.variable_scope('Branch_3'): + tower_pool = slim.max_pool2d( + net, + 3, + stride=2, + padding='VALID', + scope='MaxPool_1a_3x3') + net = tf.concat([ + tower_conv_1, tower_conv1_1, tower_conv2_2, tower_pool + ], 3) + end_points[name] = net + + # Block 8 + name = "Block8" + trainable = is_trainable(name, trainable_variables) + net = slim.repeat( + net, + 9, + block8, + scale=0.20, + trainable_variables=trainable, + reuse=reuse) + net = block8( + net, + activation_fn=None, + trainable_variables=trainable, + reuse=reuse) + + name = "Conv2d_7b_1x1" + trainable = is_trainable(name, trainable_variables) + net = slim.conv2d( + net, 1536, 1, scope=name, trainable=trainable, reuse=reuse) + end_points[name] = net + + with tf.variable_scope('Logits'): + end_points['PrePool'] = net + # pylint: disable=no-member + net = slim.avg_pool2d( net, - bottleneck_layer_size, - activation_fn=None, - scope=name, - reuse=reuse, - trainable=trainable) - end_points[name] = net + net.get_shape()[1:3], + padding='VALID', + scope='AvgPool_1a_8x8') + net = slim.flatten(net) + + net = slim.dropout(net, dropout_keep_prob, scope='Dropout') + + end_points['PreLogitsFlatten'] = net + + name = "Bottleneck" + trainable = is_trainable(name, trainable_variables) + net = slim.fully_connected( + net, + bottleneck_layer_size, + activation_fn=None, + scope=name, + reuse=reuse, + trainable=trainable) + end_points[name] = net return net, end_points diff --git a/bob/learn/tensorflow/network/JointIncResV2Simple.py b/bob/learn/tensorflow/network/JointIncResV2Simple.py index 3004ecbc4edf718c55d80e1a979847a1165f0c3a..3044de704ccc6740741bc6101f5c88adea41f668 100644 --- a/bob/learn/tensorflow/network/JointIncResV2Simple.py +++ b/bob/learn/tensorflow/network/JointIncResV2Simple.py @@ -38,8 +38,7 @@ def model_fn(features, labels, mode, params, config): else: reuse = True with tf.variable_scope('SimpleCNN', reuse=reuse): - net, _ = simplecnn_arch( - patches[:, i], mode, **simplecnn_kwargs) + net, _ = simplecnn_arch(patches[:, i], mode, **simplecnn_kwargs) if i == 0: simplecnn_embeddings = net else: diff --git a/bob/learn/tensorflow/network/PatchCNN.py b/bob/learn/tensorflow/network/PatchCNN.py index d5dd111df08cc3ed6d9a8f9565acc83c7a0b3729..6a05307a53e359fb0d868783fa069703d6dd859a 100644 --- a/bob/learn/tensorflow/network/PatchCNN.py +++ b/bob/learn/tensorflow/network/PatchCNN.py @@ -44,8 +44,16 @@ from __future__ import print_function import tensorflow as tf -def create_conv_layer(inputs, mode, data_format, endpoints, number, filters, - kernel_size, pool_size, pool_strides, skip_pool=False): +def create_conv_layer(inputs, + mode, + data_format, + endpoints, + number, + filters, + kernel_size, + pool_size, + pool_strides, + skip_pool=False): bn_axis = 1 if data_format.lower() == 'channels_first' else 3 training = mode == tf.estimator.ModeKeys.TRAIN @@ -74,8 +82,12 @@ def create_conv_layer(inputs, mode, data_format, endpoints, number, filters, pool = bn_act else: pool = tf.layers.max_pooling2d( - inputs=bn_act, pool_size=pool_size, strides=pool_strides, - padding='same', data_format=data_format, name=name) + inputs=bn_act, + pool_size=pool_size, + strides=pool_strides, + padding='same', + data_format=data_format, + name=name) endpoints[name] = pool return pool @@ -101,8 +113,11 @@ def create_dense_layer(inputs, mode, endpoints, number, units): return bn_act -def base_architecture(input_layer, mode, data_format, - skip_first_two_pool=False, **kwargs): +def base_architecture(input_layer, + mode, + data_format, + skip_first_two_pool=False, + **kwargs): training = mode == tf.estimator.ModeKeys.TRAIN # Keep track of all the endpoints endpoints = {} @@ -110,37 +125,69 @@ def base_architecture(input_layer, mode, data_format, # ====================== # Convolutional Layer #1 pool1 = create_conv_layer( - inputs=input_layer, mode=mode, data_format=data_format, - endpoints=endpoints, number=1, filters=50, kernel_size=(5, 5), - pool_size=(2, 2), pool_strides=2, skip_pool=skip_first_two_pool) + inputs=input_layer, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=1, + filters=50, + kernel_size=(5, 5), + pool_size=(2, 2), + pool_strides=2, + skip_pool=skip_first_two_pool) # ====================== # Convolutional Layer #2 pool2 = create_conv_layer( - inputs=pool1, mode=mode, data_format=data_format, - endpoints=endpoints, number=2, filters=100, kernel_size=(3, 3), - pool_size=(2, 2), pool_strides=2, skip_pool=skip_first_two_pool) + inputs=pool1, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=2, + filters=100, + kernel_size=(3, 3), + pool_size=(2, 2), + pool_strides=2, + skip_pool=skip_first_two_pool) # ====================== # Convolutional Layer #3 pool3 = create_conv_layer( - inputs=pool2, mode=mode, data_format=data_format, - endpoints=endpoints, number=3, filters=150, kernel_size=(3, 3), - pool_size=(3, 3), pool_strides=2) + inputs=pool2, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=3, + filters=150, + kernel_size=(3, 3), + pool_size=(3, 3), + pool_strides=2) # ====================== # Convolutional Layer #4 pool4 = create_conv_layer( - inputs=pool3, mode=mode, data_format=data_format, - endpoints=endpoints, number=4, filters=200, kernel_size=(3, 3), - pool_size=(2, 2), pool_strides=2) + inputs=pool3, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=4, + filters=200, + kernel_size=(3, 3), + pool_size=(2, 2), + pool_strides=2) # ====================== # Convolutional Layer #5 pool5 = create_conv_layer( - inputs=pool4, mode=mode, data_format=data_format, - endpoints=endpoints, number=5, filters=250, kernel_size=(3, 3), - pool_size=(2, 2), pool_strides=2) + inputs=pool4, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=5, + filters=250, + kernel_size=(3, 3), + pool_size=(2, 2), + pool_strides=2) # ======================== # Flatten tensor into a batch of vectors @@ -151,7 +198,10 @@ def base_architecture(input_layer, mode, data_format, # ======================== # Fully Connected Layer #1 fc1 = create_dense_layer( - inputs=pool5_flat, mode=mode, endpoints=endpoints, number=1, + inputs=pool5_flat, + mode=mode, + endpoints=endpoints, + number=1, units=1000) # ======================== @@ -164,8 +214,7 @@ def base_architecture(input_layer, mode, data_format, # ======================== # Fully Connected Layer #2 fc2 = create_dense_layer( - inputs=dropout, mode=mode, endpoints=endpoints, number=2, - units=400) + inputs=dropout, mode=mode, endpoints=endpoints, number=2, units=400) return fc2, endpoints @@ -179,11 +228,12 @@ def architecture(input_layer, regularizer=None, **kwargs): - with tf.variable_scope('PatchCNN', reuse=reuse, - regularizer=regularizer): + with tf.variable_scope('PatchCNN', reuse=reuse, regularizer=regularizer): fc2, endpoints = base_architecture( - input_layer=input_layer, mode=mode, data_format=data_format, + input_layer=input_layer, + mode=mode, + data_format=data_format, skip_first_two_pool=skip_first_two_pool) # Logits layer logits = tf.layers.dense(inputs=fc2, units=n_classes) @@ -252,14 +302,12 @@ def model_fn(features, labels, mode, params=None, config=None): staircase=staircase) optimizer = tf.train.MomentumOptimizer( - learning_rate=learning_rate, - momentum=momentum) + learning_rate=learning_rate, momentum=momentum) # for batch normalization to be updated as well: update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS) with tf.control_dependencies(update_ops): - train_op = optimizer.minimize( - loss=loss, global_step=global_step) + train_op = optimizer.minimize(loss=loss, global_step=global_step) # Log accuracy and loss with tf.name_scope('train_metrics'): diff --git a/bob/learn/tensorflow/network/SimpleCNN.py b/bob/learn/tensorflow/network/SimpleCNN.py index 667a9465eb51dd72cd1acc70177a385d473407b1..c44ccfbdec762ca9beaba493b6800d8ed71e9a72 100644 --- a/bob/learn/tensorflow/network/SimpleCNN.py +++ b/bob/learn/tensorflow/network/SimpleCNN.py @@ -4,9 +4,17 @@ from .utils import is_trainable from ..estimators import get_trainable_variables -def create_conv_layer(inputs, mode, data_format, endpoints, number, filters, - kernel_size, pool_size, pool_strides, - add_batch_norm=False, trainable_variables=None): +def create_conv_layer(inputs, + mode, + data_format, + endpoints, + number, + filters, + kernel_size, + pool_size, + pool_strides, + add_batch_norm=False, + trainable_variables=None): bn_axis = 1 if data_format.lower() == 'channels_first' else 3 training = mode == tf.estimator.ModeKeys.TRAIN @@ -42,15 +50,22 @@ def create_conv_layer(inputs, mode, data_format, endpoints, number, filters, name = 'pool{}'.format(number) pool = tf.layers.max_pooling2d( - inputs=bn_act, pool_size=pool_size, strides=pool_strides, - padding='same', data_format=data_format) + inputs=bn_act, + pool_size=pool_size, + strides=pool_strides, + padding='same', + data_format=data_format) endpoints[name] = pool return pool -def base_architecture(input_layer, mode, kernerl_size, data_format, - add_batch_norm=False, trainable_variables=None, +def base_architecture(input_layer, + mode, + kernerl_size, + data_format, + add_batch_norm=False, + trainable_variables=None, **kwargs): training = mode == tf.estimator.ModeKeys.TRAIN # Keep track of all the endpoints @@ -61,18 +76,32 @@ def base_architecture(input_layer, mode, kernerl_size, data_format, # activation. # Padding is added to preserve width and height. pool1 = create_conv_layer( - inputs=input_layer, mode=mode, data_format=data_format, - endpoints=endpoints, number=1, filters=32, kernel_size=kernerl_size, - pool_size=(2, 2), pool_strides=2, add_batch_norm=add_batch_norm, + inputs=input_layer, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=1, + filters=32, + kernel_size=kernerl_size, + pool_size=(2, 2), + pool_strides=2, + add_batch_norm=add_batch_norm, trainable_variables=trainable_variables) # Convolutional Layer #2 # Computes 64 features using a kernerl_size filter. # Padding is added to preserve width and height. pool2 = create_conv_layer( - inputs=pool1, mode=mode, data_format=data_format, - endpoints=endpoints, number=2, filters=64, kernel_size=kernerl_size, - pool_size=(2, 2), pool_strides=2, add_batch_norm=add_batch_norm, + inputs=pool1, + mode=mode, + data_format=data_format, + endpoints=endpoints, + number=2, + filters=64, + kernel_size=kernerl_size, + pool_size=(2, 2), + pool_strides=2, + add_batch_norm=add_batch_norm, trainable_variables=trainable_variables) # Flatten tensor into a batch of vectors @@ -89,7 +118,9 @@ def base_architecture(input_layer, mode, kernerl_size, data_format, name = 'dense' trainable = is_trainable(name, trainable_variables) dense = tf.layers.dense( - inputs=pool2_flat, units=1024, activation=activation, + inputs=pool2_flat, + units=1024, + activation=activation, trainable=trainable) endpoints[name] = dense @@ -127,7 +158,10 @@ def architecture(input_layer, with tf.variable_scope('SimpleCNN', reuse=reuse): dropout, endpoints = base_architecture( - input_layer, mode, kernerl_size, data_format, + input_layer, + mode, + kernerl_size, + data_format, add_batch_norm=add_batch_norm, trainable_variables=trainable_variables) # Logits layer @@ -135,8 +169,8 @@ def architecture(input_layer, # Output Tensor Shape: [batch_size, n_classes] name = 'logits' trainable = is_trainable(name, trainable_variables) - logits = tf.layers.dense(inputs=dropout, units=n_classes, - trainable=trainable) + logits = tf.layers.dense( + inputs=dropout, units=n_classes, trainable=trainable) endpoints[name] = logits return logits, endpoints diff --git a/bob/learn/tensorflow/script/compute_statistics.py b/bob/learn/tensorflow/script/compute_statistics.py index ca27305eec916e603ece4d67c7e910824d072ada..4b6daeff9da133466af4282703faeeb11f7ece21 100644 --- a/bob/learn/tensorflow/script/compute_statistics.py +++ b/bob/learn/tensorflow/script/compute_statistics.py @@ -7,21 +7,30 @@ from __future__ import print_function import logging import click import numpy as np -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) +from bob.extension.scripts.click_helper import (verbosity_option, + ConfigCommand, ResourceOption) from bob.learn.tensorflow.dataset.bio import BioGenerator logger = logging.getLogger(__name__) -@click.command(entry_point_group='bob.learn.tensorflow.config', - cls=ConfigCommand) -@click.option('--database', '-d', required=True, cls=ResourceOption, - entry_point_group='bob.bio.database') -@click.option('--biofiles', required=True, cls=ResourceOption, - help='You can only provide this through config files.') -@click.option('--load-data', cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.load_data') +@click.command( + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) +@click.option( + '--database', + '-d', + required=True, + cls=ResourceOption, + entry_point_group='bob.bio.database') +@click.option( + '--biofiles', + required=True, + cls=ResourceOption, + help='You can only provide this through config files.') +@click.option( + '--load-data', + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.load_data') @click.option('--multiple-samples', is_flag=True, cls=ResourceOption) @verbosity_option(cls=ResourceOption) def compute_statistics(database, biofiles, load_data, multiple_samples, diff --git a/bob/learn/tensorflow/script/db_to_tfrecords.py b/bob/learn/tensorflow/script/db_to_tfrecords.py index 8107511858e722cbf0b928a1b2fee204af837fb2..cc86df07811d47108853cdb824c12cd165ad747d 100644 --- a/bob/learn/tensorflow/script/db_to_tfrecords.py +++ b/bob/learn/tensorflow/script/db_to_tfrecords.py @@ -12,8 +12,8 @@ import logging import click import tensorflow as tf from bob.io.base import create_directories_safe -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) +from bob.extension.scripts.click_helper import (verbosity_option, + ConfigCommand, ResourceOption) logger = logging.getLogger(__name__) @@ -73,8 +73,8 @@ def _bytes2human(n, format='%(value).1f %(symbol)s', symbols='customary'): return format % dict(symbol=symbols[0], value=n) -@click.command(entry_point_group='bob.learn.tensorflow.config', - cls=ConfigCommand) +@click.command( + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) @click.option('--samples', required=True, cls=ResourceOption) @click.option('--reader', required=True, cls=ResourceOption) @click.option('--output', '-o', required=True, cls=ResourceOption) @@ -211,7 +211,10 @@ def db_to_tfrecords(samples, reader, output, shuffle, allow_failures, if multiple_samples: for sample in data: total_size += write_a_sample( - writer, sample, label, key, + writer, + sample, + label, + key, size_estimate=size_estimate) sample_count += 1 else: diff --git a/bob/learn/tensorflow/script/eval.py b/bob/learn/tensorflow/script/eval.py index 8ba3428cf6c5ddfe57572ce24dd43660cb8ee630..55d80be80f7865b344429d6b70b30f90d273e45e 100644 --- a/bob/learn/tensorflow/script/eval.py +++ b/bob/learn/tensorflow/script/eval.py @@ -15,8 +15,8 @@ import time from glob import glob from collections import defaultdict, OrderedDict from ..utils.eval import get_global_step -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) +from bob.extension.scripts.click_helper import (verbosity_option, + ConfigCommand, ResourceOption) from bob.io.base import create_directories_safe logger = logging.getLogger(__name__) @@ -33,8 +33,8 @@ def save_n_best_models(train_dir, save_dir, evaluated_file, lo = x.get('loss') or 0 return (lo, ac * -1) - best_models = OrderedDict(sorted( - evaluated.items(), key=_key)[:keep_n_best_models]) + best_models = OrderedDict( + sorted(evaluated.items(), key=_key)[:keep_n_best_models]) # delete the old saved models that are not in top N best anymore saved_models = defaultdict(list) @@ -61,6 +61,7 @@ def save_n_best_models(train_dir, save_dir, evaluated_file, # 1. filter non-existing models first def _filter(x): return len(glob('{}/model.ckpt-{}.*'.format(save_dir, x[0]))) > 0 + best_models = OrderedDict(filter(_filter, best_models.items())) # 2. create the checkpoint file @@ -91,29 +92,47 @@ def read_evaluated_file(path): def append_evaluated_file(path, evaluations): str_evaluations = ', '.join( - '%s = %s' % (k, v) - for k, v in sorted(six.iteritems(evaluations))) + '%s = %s' % (k, v) for k, v in sorted(six.iteritems(evaluations))) with open(path, 'a') as f: - f.write('{} {}\n'.format(evaluations['global_step'], - str_evaluations)) + f.write('{} {}\n'.format(evaluations['global_step'], str_evaluations)) return str_evaluations -@click.command(entry_point_group='bob.learn.tensorflow.config', - cls=ConfigCommand) -@click.option('--estimator', '-e', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.estimator') -@click.option('--eval-input-fn', '-i', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.input_fn') -@click.option('--hooks', cls=ResourceOption, multiple=True, - entry_point_group='bob.learn.tensorflow.hook') -@click.option('--run-once', cls=ResourceOption, default=False, - show_default=True) -@click.option('--eval-interval-secs', cls=ResourceOption, type=click.INT, - default=60, show_default=True) +@click.command( + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) +@click.option( + '--estimator', + '-e', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.estimator') +@click.option( + '--eval-input-fn', + '-i', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.input_fn') +@click.option( + '--hooks', + cls=ResourceOption, + multiple=True, + entry_point_group='bob.learn.tensorflow.hook') +@click.option( + '--run-once', cls=ResourceOption, default=False, show_default=True) +@click.option( + '--eval-interval-secs', + cls=ResourceOption, + type=click.INT, + default=60, + show_default=True) @click.option('--name', cls=ResourceOption) -@click.option('--keep-n-best-models', '-K', type=click.INT, cls=ResourceOption, - default=0, show_default=True) +@click.option( + '--keep-n-best-models', + '-K', + type=click.INT, + cls=ResourceOption, + default=0, + show_default=True) @verbosity_option(cls=ResourceOption) def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name, keep_n_best_models, **kwargs): @@ -198,8 +217,8 @@ def eval(estimator, eval_input_fn, hooks, run_once, eval_interval_secs, name, name=name, ) - str_evaluations = append_evaluated_file( - evaluated_file, evaluations) + str_evaluations = append_evaluated_file(evaluated_file, + evaluations) click.echo(str_evaluations) sys.stdout.flush() diff --git a/bob/learn/tensorflow/script/predict_bio.py b/bob/learn/tensorflow/script/predict_bio.py index c4b396340a27ee1aad19243140c0b23a75cd33da..7bbb9b678d9b5a41b77af865b66f63fe80d7d223 100644 --- a/bob/learn/tensorflow/script/predict_bio.py +++ b/bob/learn/tensorflow/script/predict_bio.py @@ -8,8 +8,8 @@ import os import sys import logging import click -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) +from bob.extension.scripts.click_helper import (verbosity_option, + ConfigCommand, ResourceOption) from multiprocessing import Pool from collections import defaultdict import numpy as np @@ -58,23 +58,42 @@ def save_predictions(pool, output_dir, key, pred_buffer): pool.apply_async(save, (np.mean(pred_buffer[key], axis=0), outpath)) -@click.command(entry_point_group='bob.learn.tensorflow.config', - cls=ConfigCommand) -@click.option('--estimator', '-e', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.estimator') -@click.option('--database', '-d', required=True, cls=ResourceOption, - entry_point_group='bob.bio.database') -@click.option('--biofiles', required=True, cls=ResourceOption, - help='You can only provide this through config files.') -@click.option('--bio-predict-input-fn', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.biogenerator_input') +@click.command( + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) +@click.option( + '--estimator', + '-e', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.estimator') +@click.option( + '--database', + '-d', + required=True, + cls=ResourceOption, + entry_point_group='bob.bio.database') +@click.option( + '--biofiles', + required=True, + cls=ResourceOption, + help='You can only provide this through config files.') +@click.option( + '--bio-predict-input-fn', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.biogenerator_input') @click.option('--output-dir', '-o', required=True, cls=ResourceOption) -@click.option('--load-data', cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.load_data') -@click.option('--hooks', cls=ResourceOption, multiple=True, - entry_point_group='bob.learn.tensorflow.hook') -@click.option('--predict-keys', '-k', multiple=True, default=None, - cls=ResourceOption) +@click.option( + '--load-data', + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.load_data') +@click.option( + '--hooks', + cls=ResourceOption, + multiple=True, + entry_point_group='bob.learn.tensorflow.hook') +@click.option( + '--predict-keys', '-k', multiple=True, default=None, cls=ResourceOption) @click.option('--checkpoint-path', '-c', cls=ResourceOption) @click.option('--multiple-samples', '-m', is_flag=True, cls=ResourceOption) @click.option('--array', '-t', type=click.INT, default=1, cls=ResourceOption) @@ -199,8 +218,9 @@ def predict_bio(estimator, database, biofiles, bio_predict_input_fn, biofiles = biofiles[start:end] # filter the existing files - paths = [make_output_path(output_dir, f.make_path("", "")) - for f in biofiles] + paths = [ + make_output_path(output_dir, f.make_path("", "")) for f in biofiles + ] indexes = non_existing_files(paths, force) biofiles = [biofiles[i] for i in indexes] diff --git a/bob/learn/tensorflow/script/train.py b/bob/learn/tensorflow/script/train.py index 9e6e9196b9f4e3b4bd2aca6761e49dc515cacfa6..9379b703e04437bc6ad83c900cfd8993d2290930 100644 --- a/bob/learn/tensorflow/script/train.py +++ b/bob/learn/tensorflow/script/train.py @@ -6,20 +6,31 @@ from __future__ import division from __future__ import print_function import logging import click -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) +from bob.extension.scripts.click_helper import (verbosity_option, + ConfigCommand, ResourceOption) logger = logging.getLogger(__name__) -@click.command(entry_point_group='bob.learn.tensorflow.config', - cls=ConfigCommand) -@click.option('--estimator', '-e', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.estimator') -@click.option('--train-input-fn', '-i', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.input_fn') -@click.option('--hooks', cls=ResourceOption, multiple=True, - entry_point_group='bob.learn.tensorflow.hook') +@click.command( + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) +@click.option( + '--estimator', + '-e', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.estimator') +@click.option( + '--train-input-fn', + '-i', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.input_fn') +@click.option( + '--hooks', + cls=ResourceOption, + multiple=True, + entry_point_group='bob.learn.tensorflow.hook') @click.option('--steps', '-s', cls=ResourceOption, type=click.types.INT) @click.option('--max-steps', '-m', cls=ResourceOption, type=click.types.INT) @verbosity_option(cls=ResourceOption) diff --git a/bob/learn/tensorflow/script/train_and_evaluate.py b/bob/learn/tensorflow/script/train_and_evaluate.py index b15da09ff70c65bf2a10017295130b67bc25f4ab..5736879cc9fcb605ec0fd0a5583b0d5cab2a8167 100644 --- a/bob/learn/tensorflow/script/train_and_evaluate.py +++ b/bob/learn/tensorflow/script/train_and_evaluate.py @@ -8,23 +8,39 @@ import tensorflow as tf from bob.learn.tensorflow.utils.hooks import EarlyStopException import logging import click -from bob.extension.scripts.click_helper import ( - verbosity_option, ConfigCommand, ResourceOption) +from bob.extension.scripts.click_helper import (verbosity_option, + ConfigCommand, ResourceOption) logger = logging.getLogger(__name__) -@click.command(entry_point_group='bob.learn.tensorflow.config', - cls=ConfigCommand) -@click.option('--estimator', '-e', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.estimator') -@click.option('--train-spec', '-it', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.trainspec') -@click.option('--eval-spec', '-ie', required=True, cls=ResourceOption, - entry_point_group='bob.learn.tensorflow.evalspec') -@click.option('--exit-ok-exceptions', cls=ResourceOption, multiple=True, - default=(EarlyStopException,), show_default=True, - entry_point_group='bob.learn.tensorflow.exception') +@click.command( + entry_point_group='bob.learn.tensorflow.config', cls=ConfigCommand) +@click.option( + '--estimator', + '-e', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.estimator') +@click.option( + '--train-spec', + '-it', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.trainspec') +@click.option( + '--eval-spec', + '-ie', + required=True, + cls=ResourceOption, + entry_point_group='bob.learn.tensorflow.evalspec') +@click.option( + '--exit-ok-exceptions', + cls=ResourceOption, + multiple=True, + default=(EarlyStopException, ), + show_default=True, + entry_point_group='bob.learn.tensorflow.exception') @verbosity_option(cls=ResourceOption) def train_and_evaluate(estimator, train_spec, eval_spec, exit_ok_exceptions, **kwargs): diff --git a/bob/learn/tensorflow/test/data/mnist_input_fn.py b/bob/learn/tensorflow/test/data/mnist_input_fn.py index 75eb39c548b7d854dde6c55c10bd115392626003..e5bf1f4a058a96b529613a879e396c66a410afa6 100644 --- a/bob/learn/tensorflow/test/data/mnist_input_fn.py +++ b/bob/learn/tensorflow/test/data/mnist_input_fn.py @@ -15,8 +15,10 @@ def input_fn(mode): shuffle = True data, labels = database.data(groups=groups) return tf.estimator.inputs.numpy_input_fn( - x={"data": data.astype('float32'), - 'key': labels.astype('float32')}, + x={ + "data": data.astype('float32'), + 'key': labels.astype('float32') + }, y=labels.astype('int32'), batch_size=128, num_epochs=num_epochs, diff --git a/bob/learn/tensorflow/test/test_architectures.py b/bob/learn/tensorflow/test/test_architectures.py index 4e6c532709a258397bc7db559f6c53cabf8a076a..85cd8310c2b89455d458ea4e77e6af9e3befae1a 100644 --- a/bob/learn/tensorflow/test/test_architectures.py +++ b/bob/learn/tensorflow/test/test_architectures.py @@ -6,12 +6,13 @@ import tensorflow as tf from bob.learn.tensorflow.network import inception_resnet_v2, inception_resnet_v2_batch_norm,\ inception_resnet_v1, inception_resnet_v1_batch_norm + def test_inceptionv2(): # Testing WITHOUT batch norm inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) - graph, _ = inception_resnet_v2(inputs) - assert len(tf.trainable_variables())==490 + graph, _ = inception_resnet_v2(inputs) + assert len(tf.trainable_variables()) == 490 tf.reset_default_graph() assert len(tf.global_variables()) == 0 @@ -19,7 +20,7 @@ def test_inceptionv2(): # Testing WITH batch norm inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) graph, _ = inception_resnet_v2_batch_norm(inputs) - assert len(tf.trainable_variables())==900 + assert len(tf.trainable_variables()) == 900 tf.reset_default_graph() assert len(tf.global_variables()) == 0 @@ -29,8 +30,8 @@ def test_inceptionv1(): # Testing WITHOUT batch norm inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) - graph, _ = inception_resnet_v1(inputs) - assert len(tf.trainable_variables())==266 + graph, _ = inception_resnet_v1(inputs) + assert len(tf.trainable_variables()) == 266 tf.reset_default_graph() assert len(tf.global_variables()) == 0 @@ -38,8 +39,7 @@ def test_inceptionv1(): # Testing WITH batch norm inputs = tf.placeholder(tf.float32, shape=(1, 160, 160, 1)) graph, _ = inception_resnet_v1_batch_norm(inputs) - assert len(tf.trainable_variables())==490 + assert len(tf.trainable_variables()) == 490 tf.reset_default_graph() assert len(tf.global_variables()) == 0 - diff --git a/bob/learn/tensorflow/test/test_db_to_tfrecords.py b/bob/learn/tensorflow/test/test_db_to_tfrecords.py index 1ed6900cfaa9512a519453f56cfb725e6418c270..5027fcfb5d107f7fa9fb9faf5628618947f34ef0 100644 --- a/bob/learn/tensorflow/test/test_db_to_tfrecords.py +++ b/bob/learn/tensorflow/test/test_db_to_tfrecords.py @@ -18,8 +18,10 @@ def test_db_to_tfrecords(): try: runner = CliRunner() - result = runner.invoke(db_to_tfrecords, args=( - dummy_config, '--output', output_path), standalone_mode=False) + result = runner.invoke( + db_to_tfrecords, + args=(dummy_config, '--output', output_path), + standalone_mode=False) assert result.exit_code == 0, '%s\n%s\n%s' % ( result.exc_info, result.output, result.exception) diff --git a/bob/learn/tensorflow/test/test_estimator_onegraph.py b/bob/learn/tensorflow/test/test_estimator_onegraph.py index 9a8d798b1f1c661a11c3fbe622fa53e441bf576d..5cc989d931b1c5fa29a5cf82cddb0a17556dda47 100644 --- a/bob/learn/tensorflow/test/test_estimator_onegraph.py +++ b/bob/learn/tensorflow/test/test_estimator_onegraph.py @@ -33,6 +33,7 @@ epochs = 6 steps = 5000 reproducible.set_seed() + def test_logitstrainer(): # Trainer logits try: diff --git a/bob/learn/tensorflow/test/test_estimator_scripts.py b/bob/learn/tensorflow/test/test_estimator_scripts.py index 625c69620d8de2d11e1cafcfcf960900a4892eba..71845a57343015501d41fb968ab40eb17287433b 100644 --- a/bob/learn/tensorflow/test/test_estimator_scripts.py +++ b/bob/learn/tensorflow/test/test_estimator_scripts.py @@ -102,8 +102,8 @@ def _create_tfrecord(test_dir): f2.write(f.read().replace('TEST_DIR', test_dir)) output = os.path.join(test_dir, 'dev.tfrecords') runner = CliRunner() - result = runner.invoke(db_to_tfrecords, args=[ - dummy_tfrecord_config, '--output', output]) + result = runner.invoke( + db_to_tfrecords, args=[dummy_tfrecord_config, '--output', output]) assert result.exit_code == 0, '%s\n%s\n%s' % ( result.exc_info, result.output, result.exception) return output diff --git a/bob/learn/tensorflow/test/test_estimator_siamese.py b/bob/learn/tensorflow/test/test_estimator_siamese.py index 14d2debfefa557372843f7be3a5fa16af36f5f27..3bd294e901c6975f421f728a85f69234628a0060 100644 --- a/bob/learn/tensorflow/test/test_estimator_siamese.py +++ b/bob/learn/tensorflow/test/test_estimator_siamese.py @@ -79,9 +79,9 @@ def test_siamesetrainer(): try: # Setting seed - session_config, run_config,_,_,_ = reproducible.set_seed() + session_config, run_config, _, _, _ = reproducible.set_seed() run_config = run_config.replace(save_checkpoints_steps=500) - + trainer = Siamese( model_dir=model_dir, architecture=dummy, @@ -112,7 +112,7 @@ def test_siamesetrainer_transfer(): # Trainer logits first than siamese try: # Setting seed - session_config, run_config,_,_,_ = reproducible.set_seed() + session_config, run_config, _, _, _ = reproducible.set_seed() run_config = run_config.replace(save_checkpoints_steps=500) extra_checkpoint = { @@ -140,7 +140,7 @@ def test_siamesetrainer_transfer(): model_dir=model_dir_adapted, architecture=dummy_adapted, optimizer=tf.train.GradientDescentOptimizer(learning_rate), - config=run_config, + config=run_config, loss_op=contrastive_loss, validation_batch_size=validation_batch_size, extra_checkpoint=extra_checkpoint) @@ -175,9 +175,8 @@ def test_siamesetrainer_transfer_extraparams(): "trainable_variables": ["Dummy"] } - # Setting seed - session_config, run_config,_,_,_ = reproducible.set_seed() + session_config, run_config, _, _, _ = reproducible.set_seed() run_config = run_config.replace(save_checkpoints_steps=500) # LOGISTS diff --git a/bob/learn/tensorflow/utils/util.py b/bob/learn/tensorflow/utils/util.py index 4bde87f36ad6154d15d20e70676f356b12053c36..0750d663677f369a92cc3db54ea859c03bdb60aa 100644 --- a/bob/learn/tensorflow/utils/util.py +++ b/bob/learn/tensorflow/utils/util.py @@ -40,8 +40,8 @@ def load_mnist(perc_train=0.9): train_data = data[0:n_train, :].astype("float32") * 0.00390625 train_labels = labels[0:n_train] - validation_data = data[n_train:n_train + n_validation, :].astype( - "float32") * 0.00390625 + validation_data = data[n_train:n_train + + n_validation, :].astype("float32") * 0.00390625 validation_labels = labels[n_train:n_train + n_validation] return train_data, train_labels, validation_data, validation_labels @@ -215,8 +215,7 @@ def compute_embedding_accuracy_tensors(embedding, labels, num=None): # sample) predictions = predict_using_tensors(embedding, labels, num=num) matching = [ - tf.equal(p, l) - for p, l in zip( + tf.equal(p, l) for p, l in zip( tf.unstack(predictions, num=num), tf.unstack(labels, num=num)) ]