Renamed the keyword argumente is_training_mode to mode and use the...

Renamed the keyword argumente is_training_mode to mode and use the tf.estimator.Modekeys to switch between training/validation/prediction modes
parent bc2ae383
Pipeline #13920 canceled with stages
in 14 minutes and 41 seconds
...@@ -140,7 +140,7 @@ class Logits(estimator.Estimator): ...@@ -140,7 +140,7 @@ class Logits(estimator.Estimator):
is_trainable = is_trainable_checkpoint(self.extra_checkpoint) is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
# Building the training graph # Building the training graph
prelogits = self.architecture(data, is_training_mode = True, trainable_variables=is_trainable)[0] prelogits = self.architecture(data, mode=mode, trainable_variables=is_trainable)[0]
logits = append_logits(prelogits, n_classes) logits = append_logits(prelogits, n_classes)
# Compute Loss (for both TRAIN and EVAL modes) # Compute Loss (for both TRAIN and EVAL modes)
...@@ -157,7 +157,7 @@ class Logits(estimator.Estimator): ...@@ -157,7 +157,7 @@ class Logits(estimator.Estimator):
# Building the training graph for PREDICTION OR VALIDATION # Building the training graph for PREDICTION OR VALIDATION
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0] prelogits = self.architecture(data, mode=mode, trainable_variables=False)[0]
logits = append_logits(prelogits, n_classes) logits = append_logits(prelogits, n_classes)
if self.embedding_validation: if self.embedding_validation:
...@@ -295,7 +295,7 @@ class LogitsCenterLoss(estimator.Estimator): ...@@ -295,7 +295,7 @@ class LogitsCenterLoss(estimator.Estimator):
is_trainable = is_trainable_checkpoint(self.extra_checkpoint) is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
# Building the training graph # Building the training graph
prelogits = self.architecture(data, is_training_mode = True, trainable_variables=is_trainable)[0] prelogits = self.architecture(data, mode=mode, trainable_variables=is_trainable)[0]
logits = append_logits(prelogits, n_classes) logits = append_logits(prelogits, n_classes)
# Compute Loss (for TRAIN mode) # Compute Loss (for TRAIN mode)
...@@ -316,7 +316,7 @@ class LogitsCenterLoss(estimator.Estimator): ...@@ -316,7 +316,7 @@ class LogitsCenterLoss(estimator.Estimator):
train_op=train_op) train_op=train_op)
# Building the training graph for PREDICTION OR VALIDATION # Building the training graph for PREDICTION OR VALIDATION
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0] prelogits = self.architecture(data, mode=mode, trainable_variables=False)[0]
logits = append_logits(prelogits, n_classes) logits = append_logits(prelogits, n_classes)
if self.embedding_validation: if self.embedding_validation:
......
...@@ -132,8 +132,8 @@ class Siamese(estimator.Estimator): ...@@ -132,8 +132,8 @@ class Siamese(estimator.Estimator):
raise ValueError("The input function needs to contain a dictionary with the keys `left` and `right` ") raise ValueError("The input function needs to contain a dictionary with the keys `left` and `right` ")
# Building one graph # Building one graph
prelogits_left, end_points_left = self.architecture(features['left'], is_training_mode = True, trainable_variables=is_trainable) prelogits_left, end_points_left = self.architecture(features['left'], mode=mode, trainable_variables=is_trainable)
prelogits_right, end_points_right = self.architecture(features['right'], reuse=True, is_training_mode = True, trainable_variables=is_trainable) prelogits_right, end_points_right = self.architecture(features['right'], reuse=True, mode=mode, trainable_variables=is_trainable)
if self.extra_checkpoint is not None: if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"], tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
...@@ -153,7 +153,7 @@ class Siamese(estimator.Estimator): ...@@ -153,7 +153,7 @@ class Siamese(estimator.Estimator):
data = features['data'] data = features['data']
# Compute the embeddings # Compute the embeddings
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0] prelogits = self.architecture(data, mode=mode, trainable_variables=False)[0]
embeddings = tf.nn.l2_normalize(prelogits, 1) embeddings = tf.nn.l2_normalize(prelogits, 1)
predictions = {"embeddings": embeddings} predictions = {"embeddings": embeddings}
......
...@@ -121,9 +121,9 @@ class Triplet(estimator.Estimator): ...@@ -121,9 +121,9 @@ class Triplet(estimator.Estimator):
is_trainable = is_trainable_checkpoint(self.extra_checkpoint) is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
# Building one graph # Building one graph
prelogits_anchor = self.architecture(features['anchor'], is_training_mode = True)[0] prelogits_anchor = self.architecture(features['anchor'], mode=mode)[0]
prelogits_positive = self.architecture(features['positive'], reuse=True, is_training_mode = True)[0] prelogits_positive = self.architecture(features['positive'], reuse=True, mode=mode)[0]
prelogits_negative = self.architecture(features['negative'], reuse=True, is_training_mode = True)[0] prelogits_negative = self.architecture(features['negative'], reuse=True, mode=mode)[0]
if self.extra_checkpoint is not None: if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"], tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
...@@ -141,7 +141,7 @@ class Triplet(estimator.Estimator): ...@@ -141,7 +141,7 @@ class Triplet(estimator.Estimator):
data = features['data'] data = features['data']
# Compute the embeddings # Compute the embeddings
prelogits = self.architecture(data, is_training_mode = False)[0] prelogits = self.architecture(data, mode=mode)[0]
embeddings = tf.nn.l2_normalize(prelogits, 1) embeddings = tf.nn.l2_normalize(prelogits, 1)
predictions = {"embeddings": embeddings} predictions = {"embeddings": embeddings}
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
import tensorflow as tf import tensorflow as tf
def dummy(inputs, reuse=False, is_training_mode = True, trainable_variables=True): def dummy(inputs, reuse=False, mode = tf.estimator.ModeKeys.TRAIN, trainable_variables=True):
""" """
Create all the necessary variables for this CNN Create all the necessary variables for this CNN
......
...@@ -154,7 +154,7 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -154,7 +154,7 @@ def inception_resnet_v1(inputs, is_training=True,
bottleneck_layer_size=128, bottleneck_layer_size=128,
reuse=None, reuse=None,
scope='InceptionResnetV1', scope='InceptionResnetV1',
is_training_mode = True, mode = tf.estimator.ModeKeys.TRAIN,
trainable_variables=True): trainable_variables=True):
""" """
Creates the Inception Resnet V1 model. Creates the Inception Resnet V1 model.
...@@ -165,7 +165,7 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -165,7 +165,7 @@ def inception_resnet_v1(inputs, is_training=True,
a 4-D tensor of size [batch_size, height, width, 3]. a 4-D tensor of size [batch_size, height, width, 3].
num_classes: num_classes:
number of predicted classes. number of predicted classes.
is_training: mode:
whether is training or not. whether is training or not.
dropout_keep_prob: dropout_keep_prob:
the fraction to keep before final layer. the fraction to keep before final layer.
...@@ -178,7 +178,7 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -178,7 +178,7 @@ def inception_resnet_v1(inputs, is_training=True,
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse): with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training_mode): is_training=(mode == tf.estimator.ModeKeys.TRAIN)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
...@@ -242,7 +242,7 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -242,7 +242,7 @@ def inception_resnet_v1(inputs, is_training=True,
scope='AvgPool_1a_8x8') scope='AvgPool_1a_8x8')
net = slim.flatten(net) net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training_mode, net = slim.dropout(net, dropout_keep_prob, is_training=(mode == tf.estimator.ModeKeys.TRAIN),
scope='Dropout') scope='Dropout')
end_points['PreLogitsFlatten'] = net end_points['PreLogitsFlatten'] = net
......
...@@ -105,7 +105,7 @@ def inference(images, keep_probability, phase_train=True, ...@@ -105,7 +105,7 @@ def inference(images, keep_probability, phase_train=True,
weights_regularizer=slim.l2_regularizer(weight_decay), weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm, normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params): normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training_mode=False, return inception_resnet_v2(images, mode = tf.estimator.ModeKeys.PREDICT,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse) dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
...@@ -114,7 +114,7 @@ def inception_resnet_v2(inputs, ...@@ -114,7 +114,7 @@ def inception_resnet_v2(inputs,
bottleneck_layer_size=128, bottleneck_layer_size=128,
reuse=None, reuse=None,
scope='InceptionResnetV2', scope='InceptionResnetV2',
is_training_mode = True, mode = tf.estimator.ModeKeys.TRAIN,
trainable_variables=True): trainable_variables=True):
"""Creates the Inception Resnet V2 model. """Creates the Inception Resnet V2 model.
...@@ -141,7 +141,7 @@ def inception_resnet_v2(inputs, ...@@ -141,7 +141,7 @@ def inception_resnet_v2(inputs,
end_points = {} end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse): with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training_mode): is_training=(mode == tf.estimator.ModeKeys.TRAIN)):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
......
...@@ -94,7 +94,7 @@ def main(argv=None): ...@@ -94,7 +94,7 @@ def main(argv=None):
# Evaluate # Evaluate
evaluations = estimator.evaluate( evaluations = estimator.evaluate(
input_fn=eval_input_fn, input_fn=eval_input_fn,
steps=steps, steps=1,
hooks=hooks, hooks=hooks,
checkpoint_path=checkpoint_path, checkpoint_path=checkpoint_path,
name=name, name=name,
......
...@@ -37,7 +37,7 @@ epochs = 2 ...@@ -37,7 +37,7 @@ epochs = 2
steps = 5000 steps = 5000
def dummy_adapted(inputs, reuse=False, is_training_mode = True, trainable_variables=True): def dummy_adapted(inputs, reuse=False, mode = tf.estimator.ModeKeys.TRAIN, trainable_variables=True):
""" """
Create all the necessary variables for this CNN Create all the necessary variables for this CNN
...@@ -48,7 +48,7 @@ def dummy_adapted(inputs, reuse=False, is_training_mode = True, trainable_variab ...@@ -48,7 +48,7 @@ def dummy_adapted(inputs, reuse=False, is_training_mode = True, trainable_variab
""" """
slim = tf.contrib.slim slim = tf.contrib.slim
graph, end_points = dummy(inputs, reuse=reuse, is_training_mode = is_training_mode, trainable_variables=trainable_variables) graph, end_points = dummy(inputs, reuse=reuse, mode = mode, trainable_variables=trainable_variables)
initializer = tf.contrib.layers.xavier_initializer() initializer = tf.contrib.layers.xavier_initializer()
with tf.variable_scope('Adapted', reuse=reuse): with tf.variable_scope('Adapted', reuse=reuse):
......
...@@ -67,7 +67,6 @@ def test_triplet_estimator(): ...@@ -67,7 +67,6 @@ def test_triplet_estimator():
trainer = Triplet(model_dir=model_dir, trainer = Triplet(model_dir=model_dir,
architecture=dummy, architecture=dummy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate), optimizer=tf.train.GradientDescentOptimizer(learning_rate),
n_classes=10,
loss_op=triplet_loss, loss_op=triplet_loss,
validation_batch_size=validation_batch_size) validation_batch_size=validation_batch_size)
run_triplet_estimator(trainer) run_triplet_estimator(trainer)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment