Commit 77838cb4 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Reorganized the keyword arguments for transfer learning

parent 66fb1898
Pipeline #13733 failed with stages
in 17 minutes and 39 seconds
......@@ -130,13 +130,34 @@ class Logits(estimator.Estimator):
data = features['data']
key = features['key']
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# Building one graph, by default everything is trainable
if self.extra_checkpoint is None:
is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
prelogits = self.architecture(data, is_trainable=is_trainable)[0]
# Building the training graph
prelogits = self.architecture(data, is_training_mode = True, trainable_variables=is_trainable)[0]
logits = append_logits(prelogits, n_classes)
# Compute Loss (for both TRAIN and EVAL modes)
self.loss = self.loss_op(logits, labels)
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
# Building the training graph for PREDICTION OR VALIDATION
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0]
logits = append_logits(prelogits, n_classes)
if self.embedding_validation:
......@@ -157,22 +178,9 @@ class Logits(estimator.Estimator):
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Compute Loss (for both TRAIN and EVAL modes)
# IF Validation
self.loss = self.loss_op(logits, labels)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
# Validation
if self.embedding_validation:
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
......@@ -277,19 +285,40 @@ class LogitsCenterLoss(estimator.Estimator):
data = features['data']
key = features['key']
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# Building one graph, by default everything is trainable
if self.extra_checkpoint is None:
is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
prelogits = self.architecture(data)[0]
# Building the training graph
prelogits = self.architecture(data, is_training_mode = True, trainable_variables=is_trainable)[0]
logits = append_logits(prelogits, n_classes)
# Compute Loss (for both TRAIN and EVAL modes)
# Compute Loss (for TRAIN mode)
loss_dict = mean_cross_entropy_center_loss(logits, prelogits, labels, self.n_classes,
alpha=self.alpha, factor=self.factor)
self.loss = loss_dict['loss']
centers = loss_dict['centers']
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step),
centers)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
# Building the training graph for PREDICTION OR VALIDATION
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0]
logits = append_logits(prelogits, n_classes)
if self.embedding_validation:
# Compute the embeddings
embeddings = tf.nn.l2_normalize(prelogits, 1)
......@@ -308,25 +337,10 @@ class LogitsCenterLoss(estimator.Estimator):
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# IF Validation
loss_dict = mean_cross_entropy_center_loss(logits, prelogits, labels, self.n_classes,
alpha=self.alpha, factor=self.factor)
self.loss = loss_dict['loss']
centers = loss_dict['centers']
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# Loading variables from some model just in case
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
# backprop and updating the centers
train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step),
centers)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
if self.embedding_validation:
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
......
......@@ -131,8 +131,11 @@ class Siamese(estimator.Estimator):
raise ValueError("The input function needs to contain a dictionary with the keys `left` and `right` ")
# Building one graph
prelogits_left = self.architecture(features['left'], is_trainable=is_trainable)[0]
prelogits_right = self.architecture(features['right'], reuse=True, is_trainable=is_trainable)[0]
prelogits_left = self.architecture(features['left'], is_training_mode = True, trainable_variables=is_trainable)[0]
prelogits_right = self.architecture(features['right'], reuse=True, is_training_mode = True, trainable_variables=is_trainable)[0]
for var in tf.global_variables():
tf.summary.histogram(var.op.name, var)
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
......@@ -152,7 +155,7 @@ class Siamese(estimator.Estimator):
data = features['data']
# Compute the embeddings
prelogits = self.architecture(data)[0]
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0]
embeddings = tf.nn.l2_normalize(prelogits, 1)
predictions = {"embeddings": embeddings}
......
......@@ -5,7 +5,7 @@
import tensorflow as tf
def dummy(inputs, reuse=False, is_trainable=True):
def dummy(inputs, reuse=False, is_training_mode = True, trainable_variables=True):
"""
Create all the necessary variables for this CNN
......@@ -24,7 +24,7 @@ def dummy(inputs, reuse=False, is_trainable=True):
graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, scope='conv1',
weights_initializer=initializer,
trainable=is_trainable)
trainable=trainable_variables)
end_points['conv1'] = graph
graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
......@@ -37,7 +37,7 @@ def dummy(inputs, reuse=False, is_trainable=True):
weights_initializer=initializer,
activation_fn=None,
scope='fc1',
trainable=is_trainable)
trainable=trainable_variables)
end_points['fc1'] = graph
......
......@@ -27,21 +27,21 @@ import tensorflow as tf
import tensorflow.contrib.slim as slim
# Inception-Renset-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, trainable_variables=True):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1')
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1')
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3')
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3')
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up
if activation_fn:
net = activation_fn(net)
......@@ -52,16 +52,16 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1')
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1')
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
scope='Conv2d_0b_1x7')
scope='Conv2d_0b_1x7', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
scope='Conv2d_0c_7x1')
scope='Conv2d_0c_7x1', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up
if activation_fn:
net = activation_fn(net)
......@@ -73,54 +73,54 @@ def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1')
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1')
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
scope='Conv2d_0b_1x3')
scope='Conv2d_0b_1x3', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
scope='Conv2d_0c_3x1')
scope='Conv2d_0c_3x1', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1')
activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
def reduction_a(net, k, l, m, n):
with tf.variable_scope('Branch_0'):
def reduction_a(net, k, l, m, n, trainable_variables=True, reuse=True):
with tf.variable_scope('Branch_0', reuse=reuse):
tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1')
scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_1', reuse=reuse):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,
scope='Conv2d_0b_3x3')
scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,
stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2', reuse=reuse):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
return net
def reduction_b(net):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
def reduction_b(net, reuse=True, trainable_variables=True):
with tf.variable_scope('Branch_0', reuse=reuse):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_1'):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
padding='VALID', scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_1', reuse=reuse):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_2'):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1')
padding='VALID', scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2', reuse=reuse):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
scope='Conv2d_0b_3x3')
scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3')
with tf.variable_scope('Branch_3'):
padding='VALID', scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_3', reuse=reuse):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1,
......@@ -153,7 +153,9 @@ def inception_resnet_v1(inputs, is_training=True,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV1'):
scope='InceptionResnetV1',
is_training_mode = True,
trainable_variables=True):
"""
Creates the Inception Resnet V1 model.
......@@ -176,20 +178,20 @@ def inception_resnet_v1(inputs, is_training=True,
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
is_training=is_training_mode):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3')
scope='Conv2d_1a_3x3', trainable=trainable_variables)
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3')
scope='Conv2d_2a_3x3', trainable=trainable_variables)
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3')
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3', trainable=trainable_variables)
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
......@@ -197,40 +199,40 @@ def inception_resnet_v1(inputs, is_training=True,
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1')
scope='Conv2d_3b_1x1', trainable=trainable_variables)
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3')
scope='Conv2d_4a_3x3', trainable=trainable_variables)
end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 256
net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
scope='Conv2d_4b_3x3')
scope='Conv2d_4b_3x3', trainable=trainable_variables)
end_points['Conv2d_4b_3x3'] = net
# 5 x Inception-resnet-A
net = slim.repeat(net, 5, block35, scale=0.17)
net = slim.repeat(net, 5, block35, scale=0.17, trainable_variables=trainable_variables)
end_points['Mixed_5a'] = net
# Reduction-A
with tf.variable_scope('Mixed_6a'):
net = reduction_a(net, 192, 192, 256, 384)
net = reduction_a(net, 192, 192, 256, 384, trainable_variables=trainable_variables)
end_points['Mixed_6a'] = net
# 10 x Inception-Resnet-B
net = slim.repeat(net, 10, block17, scale=0.10)
net = slim.repeat(net, 10, block17, scale=0.10, trainable_variables=trainable_variables)
end_points['Mixed_6b'] = net
# Reduction-B
with tf.variable_scope('Mixed_7a'):
net = reduction_b(net)
net = reduction_b(net, trainable_variables=trainable_variables)
end_points['Mixed_7a'] = net
# 5 x Inception-Resnet-C
net = slim.repeat(net, 5, block8, scale=0.20)
net = slim.repeat(net, 5, block8, scale=0.20, trainable_variables=trainable_variables)
end_points['Mixed_8a'] = net
net = block8(net, activation_fn=None)
net = block8(net, activation_fn=None, trainable_variables=trainable_variables)
end_points['Mixed_8b'] = net
with tf.variable_scope('Logits'):
......@@ -240,12 +242,12 @@ def inception_resnet_v1(inputs, is_training=True,
scope='AvgPool_1a_8x8')
net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
net = slim.dropout(net, dropout_keep_prob, is_training=is_training_mode,
scope='Dropout')
end_points['PreLogitsFlatten'] = net
net = slim.fully_connected(net, bottleneck_layer_size, activation_fn=None,
scope='Bottleneck', reuse=False)
scope='Bottleneck', reuse=False, trainable=trainable_variables)
return net, end_points
......@@ -27,41 +27,41 @@ import tensorflow as tf
import tensorflow.contrib.slim as slim
# Inception-Renset-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, is_training=True):
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, trainable_variables=True):
"""Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1', trainable=is_training)
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=is_training)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3', trainable=is_training)
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=is_training)
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3', trainable=is_training)
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3', trainable=is_training)
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv2_1 = slim.conv2d(tower_conv2_0, 48, 3, scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv2_2 = slim.conv2d(tower_conv2_1, 64, 3, scope='Conv2d_0c_3x3', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1', trainable=is_training)
activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up
if activation_fn:
net = activation_fn(net)
return net
# Inception-Renset-B
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, is_training=True):
def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, trainable_variables=True):
"""Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1', trainable=is_training)
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1', trainable=is_training)
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 160, [1, 7],
scope='Conv2d_0b_1x7', trainable=is_training)
scope='Conv2d_0b_1x7', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [7, 1],
scope='Conv2d_0c_7x1', trainable=is_training)
scope='Conv2d_0c_7x1', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1', trainable=is_training)
activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up
if activation_fn:
net = activation_fn(net)
......@@ -69,20 +69,20 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, is
# Inception-Resnet-C
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, is_training=True):
def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, trainable_variables=True):
"""Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1', trainable=is_training)
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1', trainable=is_training)
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 224, [1, 3],
scope='Conv2d_0b_1x3', trainable=is_training)
scope='Conv2d_0b_1x3', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, 256, [3, 1],
scope='Conv2d_0c_3x1', trainable=is_training)
scope='Conv2d_0c_3x1', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1', trainable=is_training)
activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up
if activation_fn:
net = activation_fn(net)
......@@ -105,15 +105,17 @@ def inference(images, keep_probability, phase_train=True,
weights_regularizer=slim.l2_regularizer(weight_decay),
normalizer_fn=slim.batch_norm,
normalizer_params=batch_norm_params):
return inception_resnet_v2(images, is_training=phase_train,
return inception_resnet_v2(images, is_training_mode=False,
dropout_keep_prob=keep_probability, bottleneck_layer_size=bottleneck_layer_size, reuse=reuse)
def inception_resnet_v2(inputs, is_training=True,
def inception_resnet_v2(inputs,
dropout_keep_prob=0.8,
bottleneck_layer_size=128,
reuse=None,
scope='InceptionResnetV2'):
scope='InceptionResnetV2',
is_training_mode = True,
trainable_variables=True):
"""Creates the Inception Resnet V2 model.
**Parameters**:
......@@ -137,23 +139,22 @@ def inception_resnet_v2(inputs, is_training=True,
end_points: the set of end_points from the inception model.
"""
end_points = {}
with tf.variable_scope(scope, 'InceptionResnetV2', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training):
is_training=is_training_mode):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'):
# 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3', trainable=is_training)
scope='Conv2d_1a_3x3', trainable=trainable_variables)
end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3', trainable=is_training)
scope='Conv2d_2a_3x3', trainable=trainable_variables)
end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3', trainable=is_training)
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3', trainable=trainable_variables)
end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
......@@ -161,11 +162,11 @@ def inception_resnet_v2(inputs, is_training=True,
end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1', trainable=is_training)
scope='Conv2d_3b_1x1', trainable=trainable_variables)
end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3', trainable=is_training)
scope='Conv2d_4a_3x3', trainable=trainable_variables)
end_points['Conv2d_4a_3x3'] = net