Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.learn.tensorflow
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
bob
bob.learn.tensorflow
Merge requests
!18
Monitored training
Code
Review changes
Check out branch
Download
Patches
Plain diff
Merged
Monitored training
monitored_training
into
master
Overview
5
Commits
9
Pipelines
7
Changes
6
Merged
Amir MOHAMMADI
requested to merge
monitored_training
into
master
7 years ago
Overview
5
Commits
9
Pipelines
7
Changes
6
Expand
0
0
Merge request reports
Viewing commit
a49cabae
Show latest version
6 files
+
334
−
129
Inline
Compare changes
Side-by-side
Inline
Show whitespace changes
Show one file at a time
Files
6
Search (e.g. *.vue) (Ctrl+P)
a49cabae
Add an mnist example for estimator scripts. Fixed some bugs
· a49cabae
Amir MOHAMMADI
authored
7 years ago
bob/learn/tensorflow/examples/mnist/mnist_config.py
0 → 100644
+
204
−
0
Options
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convolutional Neural Network Estimator for MNIST, built with tf.layers.
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
from
bob.learn.tensorflow.utils.reproducible
import
session_conf
import
tensorflow
as
tf
model_dir
=
'
/tmp/mnist_model
'
train_tfrecords
=
[
'
/tmp/mnist_data/train.tfrecords
'
]
eval_tfrecords
=
[
'
/tmp/mnist_data/test.tfrecords
'
]
# by default create reproducible nets:
run_config
=
tf
.
estimator
.
RunConfig
()
run_config
=
run_config
.
replace
(
session_config
=
session_conf
)
run_config
=
run_config
.
replace
(
keep_checkpoint_max
=
10
**
3
)
run_config
=
run_config
.
replace
(
save_checkpoints_secs
=
60
)
def
input_fn
(
mode
,
batch_size
=
1
):
"""
A simple input_fn using the contrib.data input pipeline.
"""
def
example_parser
(
serialized_example
):
"""
Parses a single tf.Example into image and label tensors.
"""
features
=
tf
.
parse_single_example
(
serialized_example
,
features
=
{
'
image_raw
'
:
tf
.
FixedLenFeature
([],
tf
.
string
),
'
label
'
:
tf
.
FixedLenFeature
([],
tf
.
int64
),
})
image
=
tf
.
decode_raw
(
features
[
'
image_raw
'
],
tf
.
uint8
)
image
.
set_shape
([
28
*
28
])
# Normalize the values of the image from the range
# [0, 255] to [-0.5, 0.5]
image
=
tf
.
cast
(
image
,
tf
.
float32
)
/
255
-
0.5
label
=
tf
.
cast
(
features
[
'
label
'
],
tf
.
int32
)
return
image
,
tf
.
one_hot
(
label
,
10
)
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
tfrecords_files
=
train_tfrecords
else
:
assert
mode
==
tf
.
estimator
.
ModeKeys
.
EVAL
,
'
invalid mode
'
tfrecords_files
=
eval_tfrecords
for
tfrecords_file
in
tfrecords_files
:
assert
tf
.
gfile
.
Exists
(
tfrecords_file
),
(
'
Run github.com:tensorflow/models/official/mnist/
'
'
convert_to_records.py first to convert the MNIST data to
'
'
TFRecord file format.
'
)
dataset
=
tf
.
contrib
.
data
.
TFRecordDataset
(
tfrecords_files
)
# For training, repeat the dataset forever
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
dataset
=
dataset
.
repeat
()
# Map example_parser over dataset, and batch results by up to batch_size
dataset
=
dataset
.
map
(
example_parser
,
num_threads
=
1
,
output_buffer_size
=
batch_size
)
dataset
=
dataset
.
batch
(
batch_size
)
images
,
labels
=
dataset
.
make_one_shot_iterator
().
get_next
()
return
images
,
labels
def
train_input_fn
():
return
input_fn
(
tf
.
estimator
.
ModeKeys
.
TRAIN
)
def
eval_input_fn
():
return
input_fn
(
tf
.
estimator
.
ModeKeys
.
EVAL
)
def
mnist_model
(
inputs
,
mode
):
"""
Takes the MNIST inputs and mode and outputs a tensor of logits.
"""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
inputs
=
tf
.
reshape
(
inputs
,
[
-
1
,
28
,
28
,
1
])
data_format
=
'
channels_last
'
if
tf
.
test
.
is_built_with_cuda
():
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
data_format
=
'
channels_first
'
inputs
=
tf
.
transpose
(
inputs
,
[
0
,
3
,
1
,
2
])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1
=
tf
.
layers
.
conv2d
(
inputs
=
inputs
,
filters
=
32
,
kernel_size
=
[
5
,
5
],
padding
=
'
same
'
,
activation
=
tf
.
nn
.
relu
,
data_format
=
data_format
)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1
=
tf
.
layers
.
max_pooling2d
(
inputs
=
conv1
,
pool_size
=
[
2
,
2
],
strides
=
2
,
data_format
=
data_format
)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2
=
tf
.
layers
.
conv2d
(
inputs
=
pool1
,
filters
=
64
,
kernel_size
=
[
5
,
5
],
padding
=
'
same
'
,
activation
=
tf
.
nn
.
relu
,
data_format
=
data_format
)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2
=
tf
.
layers
.
max_pooling2d
(
inputs
=
conv2
,
pool_size
=
[
2
,
2
],
strides
=
2
,
data_format
=
data_format
)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat
=
tf
.
reshape
(
pool2
,
[
-
1
,
7
*
7
*
64
])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense
=
tf
.
layers
.
dense
(
inputs
=
pool2_flat
,
units
=
1024
,
activation
=
tf
.
nn
.
relu
)
# Add dropout operation; 0.6 probability that element will be kept
dropout
=
tf
.
layers
.
dropout
(
inputs
=
dense
,
rate
=
0.4
,
training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
))
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits
=
tf
.
layers
.
dense
(
inputs
=
dropout
,
units
=
10
)
return
logits
def
model_fn
(
features
,
labels
,
mode
):
"""
Model function for MNIST.
"""
logits
=
mnist_model
(
features
,
mode
)
predictions
=
{
'
classes
'
:
tf
.
argmax
(
input
=
logits
,
axis
=
1
),
'
probabilities
'
:
tf
.
nn
.
softmax
(
logits
,
name
=
'
softmax_tensor
'
)
}
if
mode
==
tf
.
estimator
.
ModeKeys
.
PREDICT
:
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
predictions
=
predictions
)
loss
=
tf
.
losses
.
softmax_cross_entropy
(
onehot_labels
=
labels
,
logits
=
logits
)
# Configure the training op
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
1e-4
)
train_op
=
optimizer
.
minimize
(
loss
,
tf
.
train
.
get_or_create_global_step
())
else
:
train_op
=
None
accuracy
=
tf
.
metrics
.
accuracy
(
tf
.
argmax
(
labels
,
axis
=
1
),
predictions
[
'
classes
'
])
metrics
=
{
'
accuracy
'
:
accuracy
}
with
tf
.
name_scope
(
'
train_metrics
'
):
# Create a tensor named train_accuracy for logging purposes
tf
.
summary
.
scalar
(
'
train_accuracy
'
,
accuracy
[
1
])
tf
.
summary
.
scalar
(
'
train_loss
'
,
loss
)
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
predictions
=
predictions
,
loss
=
loss
,
train_op
=
train_op
,
eval_metric_ops
=
metrics
)
Loading