Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
fffc4590
Commit
fffc4590
authored
Jul 13, 2018
by
Tiago de Freitas Pereira
Browse files
Shutting down batch norm variables when trainable is equals to False for InceptionResnetv1
parent
e0ea47db
Pipeline
#21839
passed with stage
in 21 minutes and 52 seconds
Changes
1
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/network/InceptionResnetV1.py
View file @
fffc4590
...
...
@@ -303,7 +303,7 @@ def inception_resnet_v1_batch_norm(inputs,
# force in-place updates of mean and variance estimates
'updates_collections'
:
None
,
# Moving averages ends up in the trainable variables collection
'variables_collections'
:
[
tf
.
GraphKeys
.
TRAINABLE_VARIABLES
],
'variables_collections'
:
[
tf
.
GraphKeys
.
TRAINABLE_VARIABLES
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
else
None
],
}
with
slim
.
arg_scope
(
...
...
@@ -363,7 +363,7 @@ def inception_resnet_v1(inputs,
with
tf
.
variable_scope
(
scope
,
'InceptionResnetV1'
,
[
inputs
],
reuse
=
reuse
):
with
slim
.
arg_scope
(
[
slim
.
batch_norm
,
slim
.
dropout
],
[
slim
.
dropout
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
)):
with
slim
.
arg_scope
(
...
...
@@ -373,37 +373,53 @@ def inception_resnet_v1(inputs,
# 149 x 149 x 32
name
=
"Conv2d_1a_3x3"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
conv2d
(
inputs
,
32
,
3
,
stride
=
2
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
conv2d
(
inputs
,
32
,
3
,
stride
=
2
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 147 x 147 x 32
name
=
"Conv2d_2a_3x3"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
conv2d
(
net
,
32
,
3
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
conv2d
(
net
,
32
,
3
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 147 x 147 x 64
name
=
"Conv2d_2b_3x3"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
conv2d
(
net
,
64
,
3
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
conv2d
(
net
,
64
,
3
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 73 x 73 x 64
net
=
slim
.
max_pool2d
(
net
,
3
,
stride
=
2
,
padding
=
'VALID'
,
scope
=
'MaxPool_3a_3x3'
)
...
...
@@ -411,110 +427,148 @@ def inception_resnet_v1(inputs,
# 73 x 73 x 80
name
=
"Conv2d_3b_1x1"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
conv2d
(
net
,
80
,
1
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
conv2d
(
net
,
80
,
1
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 71 x 71 x 192
name
=
"Conv2d_4a_3x3"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
conv2d
(
net
,
192
,
3
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
conv2d
(
net
,
192
,
3
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 35 x 35 x 256
name
=
"Conv2d_4b_3x3"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
conv2d
(
net
,
256
,
3
,
stride
=
2
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
conv2d
(
net
,
256
,
3
,
stride
=
2
,
padding
=
'VALID'
,
scope
=
name
,
trainable
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 5 x Inception-resnet-A
name
=
"block35"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
repeat
(
net
,
5
,
block35
,
scale
=
0.17
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# Reduction-A
name
=
"Mixed_6a"
trainable
=
is_trainable
(
name
,
trainable_variables
)
with
tf
.
variable_scope
(
name
):
net
=
reduction_a
(
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
repeat
(
net
,
192
,
192
,
256
,
384
,
5
,
block35
,
scale
=
0.17
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
end_points
[
name
]
=
net
# Reduction-A
name
=
"Mixed_6a"
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
with
tf
.
variable_scope
(
name
):
net
=
reduction_a
(
net
,
192
,
192
,
256
,
384
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 10 x Inception-Resnet-B
name
=
"block17"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
repeat
(
net
,
10
,
block17
,
scale
=
0.10
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
repeat
(
net
,
10
,
block17
,
scale
=
0.10
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# Reduction-B
name
=
"Mixed_7a"
trainable
=
is_trainable
(
name
,
trainable_variables
)
with
tf
.
variable_scope
(
name
):
net
=
reduction_b
(
net
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
with
tf
.
variable_scope
(
name
):
net
=
reduction_b
(
net
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
# 5 x Inception-Resnet-C
name
=
"block8"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
repeat
(
net
,
5
,
block8
,
scale
=
0.20
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
repeat
(
net
,
5
,
block8
,
scale
=
0.20
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
name
=
"Mixed_8b"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
block8
(
net
,
activation_fn
=
None
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
block8
(
net
,
activation_fn
=
None
,
trainable_variables
=
trainable
,
reuse
=
reuse
)
end_points
[
name
]
=
net
with
tf
.
variable_scope
(
'Logits'
):
end_points
[
'PrePool'
]
=
net
...
...
@@ -535,13 +589,18 @@ def inception_resnet_v1(inputs,
end_points
[
'PreLogitsFlatten'
]
=
net
name
=
"Bottleneck"
trainable
=
is_trainable
(
name
,
trainable_variables
)
net
=
slim
.
fully_connected
(
net
,
bottleneck_layer_size
,
activation_fn
=
None
,
scope
=
name
,
reuse
=
reuse
,
trainable
=
trainable
)
trainable
=
is_trainable
(
name
,
trainable_variables
,
mode
=
mode
)
with
slim
.
arg_scope
(
[
slim
.
batch_norm
],
is_training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
),
trainable
=
trainable
):
net
=
slim
.
fully_connected
(
net
,
bottleneck_layer_size
,
activation_fn
=
None
,
scope
=
name
,
reuse
=
reuse
,
trainable
=
trainable
)
end_points
[
name
]
=
net
return
net
,
end_points
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment