Commit 77838cb4 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Reorganized the keyword arguments for transfer learning

parent 66fb1898
Pipeline #13733 failed with stages
in 17 minutes and 39 seconds
...@@ -130,13 +130,34 @@ class Logits(estimator.Estimator): ...@@ -130,13 +130,34 @@ class Logits(estimator.Estimator):
data = features['data'] data = features['data']
key = features['key'] key = features['key']
# Building one graph, by default everything is trainable # Configure the Training Op (for TRAIN mode)
if self.extra_checkpoint is None: if mode == tf.estimator.ModeKeys.TRAIN:
is_trainable = True
else: # Building one graph, by default everything is trainable
is_trainable = is_trainable_checkpoint(self.extra_checkpoint) if self.extra_checkpoint is None:
is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
# Building the training graph
prelogits = self.architecture(data, is_training_mode = True, trainable_variables=is_trainable)[0]
logits = append_logits(prelogits, n_classes)
# Compute Loss (for both TRAIN and EVAL modes)
self.loss = self.loss_op(logits, labels)
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
prelogits = self.architecture(data, is_trainable=is_trainable)[0]
# Building the training graph for PREDICTION OR VALIDATION
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0]
logits = append_logits(prelogits, n_classes) logits = append_logits(prelogits, n_classes)
if self.embedding_validation: if self.embedding_validation:
...@@ -157,22 +178,9 @@ class Logits(estimator.Estimator): ...@@ -157,22 +178,9 @@ class Logits(estimator.Estimator):
if mode == tf.estimator.ModeKeys.PREDICT: if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# Compute Loss (for both TRAIN and EVAL modes) # IF Validation
self.loss = self.loss_op(logits, labels) self.loss = self.loss_op(logits, labels)
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = self.optimizer.minimize(self.loss, global_step=global_step)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
# Validation
if self.embedding_validation: if self.embedding_validation:
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size) predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)} eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
...@@ -277,18 +285,39 @@ class LogitsCenterLoss(estimator.Estimator): ...@@ -277,18 +285,39 @@ class LogitsCenterLoss(estimator.Estimator):
data = features['data'] data = features['data']
key = features['key'] key = features['key']
# Building one graph, by default everything is trainable # Configure the Training Op (for TRAIN mode)
if self.extra_checkpoint is None: if mode == tf.estimator.ModeKeys.TRAIN:
is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
prelogits = self.architecture(data)[0]
logits = append_logits(prelogits, n_classes)
# Compute Loss (for both TRAIN and EVAL modes) # Building one graph, by default everything is trainable
loss_dict = mean_cross_entropy_center_loss(logits, prelogits, labels, self.n_classes, if self.extra_checkpoint is None:
alpha=self.alpha, factor=self.factor) is_trainable = True
else:
is_trainable = is_trainable_checkpoint(self.extra_checkpoint)
# Building the training graph
prelogits = self.architecture(data, is_training_mode = True, trainable_variables=is_trainable)[0]
logits = append_logits(prelogits, n_classes)
# Compute Loss (for TRAIN mode)
loss_dict = mean_cross_entropy_center_loss(logits, prelogits, labels, self.n_classes,
alpha=self.alpha, factor=self.factor)
self.loss = loss_dict['loss']
centers = loss_dict['centers']
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step),
centers)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
# Building the training graph for PREDICTION OR VALIDATION
prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0]
logits = append_logits(prelogits, n_classes)
if self.embedding_validation: if self.embedding_validation:
# Compute the embeddings # Compute the embeddings
...@@ -308,31 +337,16 @@ class LogitsCenterLoss(estimator.Estimator): ...@@ -308,31 +337,16 @@ class LogitsCenterLoss(estimator.Estimator):
if mode == tf.estimator.ModeKeys.PREDICT: if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions) return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)
# IF Validation
self.loss = loss_dict['loss'] loss_dict = mean_cross_entropy_center_loss(logits, prelogits, labels, self.n_classes,
centers = loss_dict['centers'] alpha=self.alpha, factor=self.factor)
self.loss = loss_dict['loss']
# Configure the Training Op (for TRAIN mode)
if mode == tf.estimator.ModeKeys.TRAIN:
# Loading variables from some model just in case
if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
self.extra_checkpoint["scopes"])
global_step = tf.contrib.framework.get_or_create_global_step()
# backprop and updating the centers
train_op = tf.group(self.optimizer.minimize(self.loss, global_step=global_step),
centers)
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss,
train_op=train_op)
if self.embedding_validation: if self.embedding_validation:
predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size) predictions_op = predict_using_tensors(predictions["embeddings"], labels, num=validation_batch_size)
eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)} eval_metric_ops = {"accuracy": tf.metrics.accuracy(labels=labels, predictions=predictions_op)}
return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss, eval_metric_ops=eval_metric_ops) return tf.estimator.EstimatorSpec(mode=mode, loss=self.loss, eval_metric_ops=eval_metric_ops)
else: else:
# Add evaluation metrics (for EVAL mode) # Add evaluation metrics (for EVAL mode)
eval_metric_ops = { eval_metric_ops = {
...@@ -344,4 +358,4 @@ class LogitsCenterLoss(estimator.Estimator): ...@@ -344,4 +358,4 @@ class LogitsCenterLoss(estimator.Estimator):
super(LogitsCenterLoss, self).__init__(model_fn=_model_fn, super(LogitsCenterLoss, self).__init__(model_fn=_model_fn,
model_dir=model_dir, model_dir=model_dir,
config=config) config=config)
...@@ -131,8 +131,11 @@ class Siamese(estimator.Estimator): ...@@ -131,8 +131,11 @@ class Siamese(estimator.Estimator):
raise ValueError("The input function needs to contain a dictionary with the keys `left` and `right` ") raise ValueError("The input function needs to contain a dictionary with the keys `left` and `right` ")
# Building one graph # Building one graph
prelogits_left = self.architecture(features['left'], is_trainable=is_trainable)[0] prelogits_left = self.architecture(features['left'], is_training_mode = True, trainable_variables=is_trainable)[0]
prelogits_right = self.architecture(features['right'], reuse=True, is_trainable=is_trainable)[0] prelogits_right = self.architecture(features['right'], reuse=True, is_training_mode = True, trainable_variables=is_trainable)[0]
for var in tf.global_variables():
tf.summary.histogram(var.op.name, var)
if self.extra_checkpoint is not None: if self.extra_checkpoint is not None:
tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"], tf.contrib.framework.init_from_checkpoint(self.extra_checkpoint["checkpoint_path"],
...@@ -152,7 +155,7 @@ class Siamese(estimator.Estimator): ...@@ -152,7 +155,7 @@ class Siamese(estimator.Estimator):
data = features['data'] data = features['data']
# Compute the embeddings # Compute the embeddings
prelogits = self.architecture(data)[0] prelogits = self.architecture(data, is_training_mode = False, trainable_variables=False)[0]
embeddings = tf.nn.l2_normalize(prelogits, 1) embeddings = tf.nn.l2_normalize(prelogits, 1)
predictions = {"embeddings": embeddings} predictions = {"embeddings": embeddings}
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
import tensorflow as tf import tensorflow as tf
def dummy(inputs, reuse=False, is_trainable=True): def dummy(inputs, reuse=False, is_training_mode = True, trainable_variables=True):
""" """
Create all the necessary variables for this CNN Create all the necessary variables for this CNN
...@@ -24,7 +24,7 @@ def dummy(inputs, reuse=False, is_trainable=True): ...@@ -24,7 +24,7 @@ def dummy(inputs, reuse=False, is_trainable=True):
graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, scope='conv1', graph = slim.conv2d(inputs, 10, [3, 3], activation_fn=tf.nn.relu, stride=1, scope='conv1',
weights_initializer=initializer, weights_initializer=initializer,
trainable=is_trainable) trainable=trainable_variables)
end_points['conv1'] = graph end_points['conv1'] = graph
graph = slim.max_pool2d(graph, [4, 4], scope='pool1') graph = slim.max_pool2d(graph, [4, 4], scope='pool1')
...@@ -37,7 +37,7 @@ def dummy(inputs, reuse=False, is_trainable=True): ...@@ -37,7 +37,7 @@ def dummy(inputs, reuse=False, is_trainable=True):
weights_initializer=initializer, weights_initializer=initializer,
activation_fn=None, activation_fn=None,
scope='fc1', scope='fc1',
trainable=is_trainable) trainable=trainable_variables)
end_points['fc1'] = graph end_points['fc1'] = graph
......
...@@ -27,21 +27,21 @@ import tensorflow as tf ...@@ -27,21 +27,21 @@ import tensorflow as tf
import tensorflow.contrib.slim as slim import tensorflow.contrib.slim as slim
# Inception-Renset-A # Inception-Renset-A
def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): def block35(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None, trainable_variables=True):
"""Builds the 35x35 resnet block.""" """Builds the 35x35 resnet block."""
with tf.variable_scope(scope, 'Block35', [net], reuse=reuse): with tf.variable_scope(scope, 'Block35', [net], reuse=reuse):
with tf.variable_scope('Branch_0'): with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 32, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'): with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3') tower_conv1_1 = slim.conv2d(tower_conv1_0, 32, 3, scope='Conv2d_0b_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2'): with tf.variable_scope('Branch_2'):
tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1') tower_conv2_0 = slim.conv2d(net, 32, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3') tower_conv2_1 = slim.conv2d(tower_conv2_0, 32, 3, scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3') tower_conv2_2 = slim.conv2d(tower_conv2_1, 32, 3, scope='Conv2d_0c_3x3', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3) mixed = tf.concat([tower_conv, tower_conv1_1, tower_conv2_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1') activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up net += scale * up
if activation_fn: if activation_fn:
net = activation_fn(net) net = activation_fn(net)
...@@ -52,16 +52,16 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): ...@@ -52,16 +52,16 @@ def block17(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 17x17 resnet block.""" """Builds the 17x17 resnet block."""
with tf.variable_scope(scope, 'Block17', [net], reuse=reuse): with tf.variable_scope(scope, 'Block17', [net], reuse=reuse):
with tf.variable_scope('Branch_0'): with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 128, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'): with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 128, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7], tower_conv1_1 = slim.conv2d(tower_conv1_0, 128, [1, 7],
scope='Conv2d_0b_1x7') scope='Conv2d_0b_1x7', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1], tower_conv1_2 = slim.conv2d(tower_conv1_1, 128, [7, 1],
scope='Conv2d_0c_7x1') scope='Conv2d_0c_7x1', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_2], 3) mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1') activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up net += scale * up
if activation_fn: if activation_fn:
net = activation_fn(net) net = activation_fn(net)
...@@ -73,54 +73,54 @@ def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None): ...@@ -73,54 +73,54 @@ def block8(net, scale=1.0, activation_fn=tf.nn.relu, scope=None, reuse=None):
"""Builds the 8x8 resnet block.""" """Builds the 8x8 resnet block."""
with tf.variable_scope(scope, 'Block8', [net], reuse=reuse): with tf.variable_scope(scope, 'Block8', [net], reuse=reuse):
with tf.variable_scope('Branch_0'): with tf.variable_scope('Branch_0'):
tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1') tower_conv = slim.conv2d(net, 192, 1, scope='Conv2d_1x1', trainable=trainable_variables)
with tf.variable_scope('Branch_1'): with tf.variable_scope('Branch_1'):
tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, 192, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3], tower_conv1_1 = slim.conv2d(tower_conv1_0, 192, [1, 3],
scope='Conv2d_0b_1x3') scope='Conv2d_0b_1x3', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1], tower_conv1_2 = slim.conv2d(tower_conv1_1, 192, [3, 1],
scope='Conv2d_0c_3x1') scope='Conv2d_0c_3x1', trainable=trainable_variables)
mixed = tf.concat([tower_conv, tower_conv1_2], 3) mixed = tf.concat([tower_conv, tower_conv1_2], 3)
up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None, up = slim.conv2d(mixed, net.get_shape()[3], 1, normalizer_fn=None,
activation_fn=None, scope='Conv2d_1x1') activation_fn=None, scope='Conv2d_1x1', trainable=trainable_variables)
net += scale * up net += scale * up
if activation_fn: if activation_fn:
net = activation_fn(net) net = activation_fn(net)
return net return net
def reduction_a(net, k, l, m, n): def reduction_a(net, k, l, m, n, trainable_variables=True, reuse=True):
with tf.variable_scope('Branch_0'): with tf.variable_scope('Branch_0', reuse=reuse):
tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID', tower_conv = slim.conv2d(net, n, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_1'): with tf.variable_scope('Branch_1', reuse=reuse):
tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1') tower_conv1_0 = slim.conv2d(net, k, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3, tower_conv1_1 = slim.conv2d(tower_conv1_0, l, 3,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3, tower_conv1_2 = slim.conv2d(tower_conv1_1, m, 3,
stride=2, padding='VALID', stride=2, padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2'): with tf.variable_scope('Branch_2', reuse=reuse):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID', tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3) net = tf.concat([tower_conv, tower_conv1_2, tower_pool], 3)
return net return net
def reduction_b(net): def reduction_b(net, reuse=True, trainable_variables=True):
with tf.variable_scope('Branch_0'): with tf.variable_scope('Branch_0', reuse=reuse):
tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2, tower_conv_1 = slim.conv2d(tower_conv, 384, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_1'): with tf.variable_scope('Branch_1', reuse=reuse):
tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv1 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2, tower_conv1_1 = slim.conv2d(tower_conv1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_2'): with tf.variable_scope('Branch_2', reuse=reuse):
tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1') tower_conv2 = slim.conv2d(net, 256, 1, scope='Conv2d_0a_1x1', trainable=trainable_variables)
tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3, tower_conv2_1 = slim.conv2d(tower_conv2, 256, 3,
scope='Conv2d_0b_3x3') scope='Conv2d_0b_3x3', trainable=trainable_variables)
tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2, tower_conv2_2 = slim.conv2d(tower_conv2_1, 256, 3, stride=2,
padding='VALID', scope='Conv2d_1a_3x3') padding='VALID', scope='Conv2d_1a_3x3', trainable=trainable_variables)
with tf.variable_scope('Branch_3'): with tf.variable_scope('Branch_3', reuse=reuse):
tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID', tower_pool = slim.max_pool2d(net, 3, stride=2, padding='VALID',
scope='MaxPool_1a_3x3') scope='MaxPool_1a_3x3')
net = tf.concat([tower_conv_1, tower_conv1_1, net = tf.concat([tower_conv_1, tower_conv1_1,
...@@ -153,7 +153,9 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -153,7 +153,9 @@ def inception_resnet_v1(inputs, is_training=True,
dropout_keep_prob=0.8, dropout_keep_prob=0.8,
bottleneck_layer_size=128, bottleneck_layer_size=128,
reuse=None, reuse=None,
scope='InceptionResnetV1'): scope='InceptionResnetV1',
is_training_mode = True,
trainable_variables=True):
""" """
Creates the Inception Resnet V1 model. Creates the Inception Resnet V1 model.
...@@ -176,20 +178,20 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -176,20 +178,20 @@ def inception_resnet_v1(inputs, is_training=True,
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse): with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope([slim.batch_norm, slim.dropout], with slim.arg_scope([slim.batch_norm, slim.dropout],
is_training=is_training): is_training=is_training_mode):
with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d], with slim.arg_scope([slim.conv2d, slim.max_pool2d, slim.avg_pool2d],
stride=1, padding='SAME'): stride=1, padding='SAME'):
# 149 x 149 x 32 # 149 x 149 x 32
net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID', net = slim.conv2d(inputs, 32, 3, stride=2, padding='VALID',
scope='Conv2d_1a_3x3') scope='Conv2d_1a_3x3', trainable=trainable_variables)
end_points['Conv2d_1a_3x3'] = net end_points['Conv2d_1a_3x3'] = net
# 147 x 147 x 32 # 147 x 147 x 32
net = slim.conv2d(net, 32, 3, padding='VALID', net = slim.conv2d(net, 32, 3, padding='VALID',
scope='Conv2d_2a_3x3') scope='Conv2d_2a_3x3', trainable=trainable_variables)
end_points['Conv2d_2a_3x3'] = net end_points['Conv2d_2a_3x3'] = net
# 147 x 147 x 64 # 147 x 147 x 64
net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3') net = slim.conv2d(net, 64, 3, scope='Conv2d_2b_3x3', trainable=trainable_variables)
end_points['Conv2d_2b_3x3'] = net end_points['Conv2d_2b_3x3'] = net
# 73 x 73 x 64 # 73 x 73 x 64
net = slim.max_pool2d(net, 3, stride=2, padding='VALID', net = slim.max_pool2d(net, 3, stride=2, padding='VALID',
...@@ -197,40 +199,40 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -197,40 +199,40 @@ def inception_resnet_v1(inputs, is_training=True,
end_points['MaxPool_3a_3x3'] = net end_points['MaxPool_3a_3x3'] = net
# 73 x 73 x 80 # 73 x 73 x 80
net = slim.conv2d(net, 80, 1, padding='VALID', net = slim.conv2d(net, 80, 1, padding='VALID',
scope='Conv2d_3b_1x1') scope='Conv2d_3b_1x1', trainable=trainable_variables)
end_points['Conv2d_3b_1x1'] = net end_points['Conv2d_3b_1x1'] = net
# 71 x 71 x 192 # 71 x 71 x 192
net = slim.conv2d(net, 192, 3, padding='VALID', net = slim.conv2d(net, 192, 3, padding='VALID',
scope='Conv2d_4a_3x3') scope='Conv2d_4a_3x3', trainable=trainable_variables)
end_points['Conv2d_4a_3x3'] = net end_points['Conv2d_4a_3x3'] = net
# 35 x 35 x 256 # 35 x 35 x 256
net = slim.conv2d(net, 256, 3, stride=2, padding='VALID', net = slim.conv2d(net, 256, 3, stride=2, padding='VALID',
scope='Conv2d_4b_3x3') scope='Conv2d_4b_3x3', trainable=trainable_variables)
end_points['Conv2d_4b_3x3'] = net end_points['Conv2d_4b_3x3'] = net
# 5 x Inception-resnet-A # 5 x Inception-resnet-A
net = slim.repeat(net, 5, block35, scale=0.17) net = slim.repeat(net, 5, block35, scale=0.17, trainable_variables=trainable_variables)
end_points['Mixed_5a'] = net end_points['Mixed_5a'] = net
# Reduction-A # Reduction-A
with tf.variable_scope('Mixed_6a'): with tf.variable_scope('Mixed_6a'):
net = reduction_a(net, 192, 192, 256, 384) net = reduction_a(net, 192, 192, 256, 384, trainable_variables=trainable_variables)
end_points['Mixed_6a'] = net end_points['Mixed_6a'] = net
# 10 x Inception-Resnet-B # 10 x Inception-Resnet-B
net = slim.repeat(net, 10, block17, scale=0.10) net = slim.repeat(net, 10, block17, scale=0.10, trainable_variables=trainable_variables)
end_points['Mixed_6b'] = net end_points['Mixed_6b'] = net
# Reduction-B # Reduction-B
with tf.variable_scope('Mixed_7a'): with tf.variable_scope('Mixed_7a'):
net = reduction_b(net) net = reduction_b(net, trainable_variables=trainable_variables)
end_points['Mixed_7a'] = net end_points['Mixed_7a'] = net
# 5 x Inception-Resnet-C # 5 x Inception-Resnet-C
net = slim.repeat(net, 5, block8, scale=0.20) net = slim.repeat(net, 5, block8, scale=0.20, trainable_variables=trainable_variables)
end_points['Mixed_8a'] = net end_points['Mixed_8a'] = net
net = block8(net, activation_fn=None) net = block8(net, activation_fn=None, trainable_variables=trainable_variables)
end_points['Mixed_8b'] = net end_points['Mixed_8b'] = net
with tf.variable_scope('Logits'): with tf.variable_scope('Logits'):
...@@ -240,12 +242,12 @@ def inception_resnet_v1(inputs, is_training=True, ...@@ -240,12 +242,12 @@ def inception_resnet_v1(inputs, is_training=True,
scope='AvgPool_1a_8x8') scope='AvgPool_1a_8x8')
net = slim.flatten(net) net = slim.flatten(net)
net = slim.dropout(net, dropout_keep_prob, is_training=is_training, net = slim.dropout(net, dropout_keep_prob, is_training=is_training_mode,
scope='Dropout')