Skip to content
Snippets Groups Projects
Commit fffc4590 authored by Tiago de Freitas Pereira's avatar Tiago de Freitas Pereira
Browse files

Shutting down batch norm variables when trainable is equals to False for InceptionResnetv1

parent e0ea47db
No related branches found
No related tags found
1 merge request!60Style transfer
Pipeline #
......@@ -303,7 +303,7 @@ def inception_resnet_v1_batch_norm(inputs,
# force in-place updates of mean and variance estimates
'updates_collections': None,
# Moving averages ends up in the trainable variables collection
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES],
'variables_collections': [tf.GraphKeys.TRAINABLE_VARIABLES if mode==tf.estimator.ModeKeys.TRAIN else None],
}
with slim.arg_scope(
......@@ -363,7 +363,7 @@ def inception_resnet_v1(inputs,
with tf.variable_scope(scope, 'InceptionResnetV1', [inputs], reuse=reuse):
with slim.arg_scope(
[slim.batch_norm, slim.dropout],
[slim.dropout],
is_training=(mode == tf.estimator.ModeKeys.TRAIN)):
with slim.arg_scope(
......@@ -373,37 +373,53 @@ def inception_resnet_v1(inputs,
# 149 x 149 x 32
name = "Conv2d_1a_3x3"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
inputs,
32,
3,
stride=2,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.conv2d(
inputs,
32,
3,
stride=2,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 147 x 147 x 32
name = "Conv2d_2a_3x3"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
net,
32,
3,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.conv2d(
net,
32,
3,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 147 x 147 x 64
name = "Conv2d_2b_3x3"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
net, 64, 3, scope=name, trainable=trainable, reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.conv2d(
net, 64, 3, scope=name, trainable=trainable, reuse=reuse)
end_points[name] = net
# 73 x 73 x 64
net = slim.max_pool2d(
net, 3, stride=2, padding='VALID', scope='MaxPool_3a_3x3')
......@@ -411,110 +427,148 @@ def inception_resnet_v1(inputs,
# 73 x 73 x 80
name = "Conv2d_3b_1x1"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
net,
80,
1,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.conv2d(
net,
80,
1,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 71 x 71 x 192
name = "Conv2d_4a_3x3"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
net,
192,
3,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.conv2d(
net,
192,
3,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 35 x 35 x 256
name = "Conv2d_4b_3x3"
trainable = is_trainable(name, trainable_variables)
net = slim.conv2d(
net,
256,
3,
stride=2,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.conv2d(
net,
256,
3,
stride=2,
padding='VALID',
scope=name,
trainable=trainable,
reuse=reuse)
end_points[name] = net
# 5 x Inception-resnet-A
name = "block35"
trainable = is_trainable(name, trainable_variables)
net = slim.repeat(
net,
5,
block35,
scale=0.17,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
# Reduction-A
name = "Mixed_6a"
trainable = is_trainable(name, trainable_variables)
with tf.variable_scope(name):
net = reduction_a(
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.repeat(
net,
192,
192,
256,
384,
5,
block35,
scale=0.17,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
end_points[name] = net
# Reduction-A
name = "Mixed_6a"
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
with tf.variable_scope(name):
net = reduction_a(
net,
192,
192,
256,
384,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
# 10 x Inception-Resnet-B
name = "block17"
trainable = is_trainable(name, trainable_variables)
net = slim.repeat(
net,
10,
block17,
scale=0.10,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.repeat(
net,
10,
block17,
scale=0.10,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
# Reduction-B
name = "Mixed_7a"
trainable = is_trainable(name, trainable_variables)
with tf.variable_scope(name):
net = reduction_b(
net, trainable_variables=trainable, reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
with tf.variable_scope(name):
net = reduction_b(
net, trainable_variables=trainable, reuse=reuse)
end_points[name] = net
# 5 x Inception-Resnet-C
name = "block8"
trainable = is_trainable(name, trainable_variables)
net = slim.repeat(
net,
5,
block8,
scale=0.20,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.repeat(
net,
5,
block8,
scale=0.20,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
name = "Mixed_8b"
trainable = is_trainable(name, trainable_variables)
net = block8(
net,
activation_fn=None,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = block8(
net,
activation_fn=None,
trainable_variables=trainable,
reuse=reuse)
end_points[name] = net
with tf.variable_scope('Logits'):
end_points['PrePool'] = net
......@@ -535,13 +589,18 @@ def inception_resnet_v1(inputs,
end_points['PreLogitsFlatten'] = net
name = "Bottleneck"
trainable = is_trainable(name, trainable_variables)
net = slim.fully_connected(
net,
bottleneck_layer_size,
activation_fn=None,
scope=name,
reuse=reuse,
trainable=trainable)
trainable = is_trainable(name, trainable_variables, mode=mode)
with slim.arg_scope(
[slim.batch_norm],
is_training=(mode == tf.estimator.ModeKeys.TRAIN),
trainable = trainable):
net = slim.fully_connected(
net,
bottleneck_layer_size,
activation_fn=None,
scope=name,
reuse=reuse,
trainable=trainable)
end_points[name] = net
return net, end_points
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment