Skip to content
GitLab
Explore
Sign in
Primary navigation
Search or go to…
Project
bob.learn.tensorflow
Manage
Activity
Members
Labels
Plan
Issues
Issue boards
Milestones
Code
Merge requests
Repository
Branches
Commits
Tags
Repository graph
Compare revisions
Build
Pipelines
Jobs
Pipeline schedules
Artifacts
Deploy
Releases
Model registry
Operate
Environments
Monitor
Incidents
Analyze
Value stream analytics
Contributor analytics
CI/CD analytics
Repository analytics
Model experiments
Help
Help
Support
GitLab documentation
Compare GitLab plans
Community forum
Contribute to GitLab
Provide feedback
Keyboard shortcuts
?
Snippets
Groups
Projects
This is an archived project. Repository and other project resources are read-only.
Show more breadcrumbs
bob
bob.learn.tensorflow
Commits
a28815cd
Commit
a28815cd
authored
4 years ago
by
Amir MOHAMMADI
Browse files
Options
Downloads
Patches
Plain Diff
remove the examples folder
parent
8b241e4d
Branches
Branches containing commit
Tags
Tags containing commit
1 merge request
!85
Porting to TF2
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
bob/learn/tensorflow/examples/mnist/mnist_config.py
+0
-233
0 additions, 233 deletions
bob/learn/tensorflow/examples/mnist/mnist_config.py
bob/learn/tensorflow/examples/mnist/tfrecords.py
+0
-62
0 additions, 62 deletions
bob/learn/tensorflow/examples/mnist/tfrecords.py
with
0 additions
and
295 deletions
bob/learn/tensorflow/examples/mnist/mnist_config.py
deleted
100644 → 0
+
0
−
233
View file @
8b241e4d
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Convolutional Neural Network Estimator for MNIST, built with tf.layers.
"""
from
__future__
import
absolute_import
from
__future__
import
division
from
__future__
import
print_function
# create reproducible nets:
from
bob.learn.tensorflow.utils.reproducible
import
run_config
import
tensorflow
as
tf
from
bob.db.mnist
import
Database
model_dir
=
'
/tmp/mnist_model
'
train_tfrecords
=
[
'
/tmp/mnist_data/train.tfrecords
'
]
eval_tfrecords
=
[
'
/tmp/mnist_data/test.tfrecords
'
]
run_config
=
run_config
.
replace
(
keep_checkpoint_max
=
10
**
3
)
run_config
=
run_config
.
replace
(
save_checkpoints_secs
=
60
)
def
input_fn
(
mode
,
batch_size
=
1
):
"""
A simple input_fn using the contrib.data input pipeline.
"""
def
example_parser
(
serialized_example
):
"""
Parses a single tf.Example into image and label tensors.
"""
features
=
tf
.
parse_single_example
(
serialized_example
,
features
=
{
'
data
'
:
tf
.
FixedLenFeature
([],
tf
.
string
),
'
label
'
:
tf
.
FixedLenFeature
([],
tf
.
int64
),
'
key
'
:
tf
.
FixedLenFeature
([],
tf
.
string
),
})
image
=
tf
.
decode_raw
(
features
[
'
data
'
],
tf
.
uint8
)
image
.
set_shape
([
28
*
28
])
# Normalize the values of the image from the range
# [0, 255] to [-0.5, 0.5]
image
=
tf
.
cast
(
image
,
tf
.
float32
)
/
255
-
0.5
label
=
tf
.
cast
(
features
[
'
label
'
],
tf
.
int32
)
key
=
tf
.
cast
(
features
[
'
key
'
],
tf
.
string
)
return
image
,
tf
.
one_hot
(
label
,
10
),
key
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
tfrecords_files
=
train_tfrecords
elif
mode
==
tf
.
estimator
.
ModeKeys
.
EVAL
:
tfrecords_files
=
eval_tfrecords
else
:
assert
mode
==
tf
.
estimator
.
ModeKeys
.
PREDICT
,
'
invalid mode
'
tfrecords_files
=
eval_tfrecords
for
tfrecords_file
in
tfrecords_files
:
assert
tf
.
gfile
.
Exists
(
tfrecords_file
),
(
'
Run github.com:tensorflow/models/official/mnist/
'
'
convert_to_records.py first to convert the MNIST data to
'
'
TFRecord file format.
'
)
dataset
=
tf
.
data
.
TFRecordDataset
(
tfrecords_files
)
# For training, repeat the dataset forever
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
dataset
=
dataset
.
repeat
()
# Map example_parser over dataset, and batch results by up to batch_size
dataset
=
dataset
.
map
(
example_parser
,
num_parallel_calls
=
1
).
prefetch
(
batch_size
)
dataset
=
dataset
.
batch
(
batch_size
)
images
,
labels
,
keys
=
dataset
.
make_one_shot_iterator
().
get_next
()
return
{
'
images
'
:
images
,
'
keys
'
:
keys
},
labels
def
train_input_fn
():
return
input_fn
(
tf
.
estimator
.
ModeKeys
.
TRAIN
)
def
eval_input_fn
():
return
input_fn
(
tf
.
estimator
.
ModeKeys
.
EVAL
)
def
predict_input_fn
():
return
input_fn
(
tf
.
estimator
.
ModeKeys
.
PREDICT
)
def
mnist_model
(
inputs
,
mode
):
"""
Takes the MNIST inputs and mode and outputs a tensor of logits.
"""
# Input Layer
# Reshape X to 4-D tensor: [batch_size, width, height, channels]
# MNIST images are 28x28 pixels, and have one color channel
inputs
=
tf
.
reshape
(
inputs
,
[
-
1
,
28
,
28
,
1
])
data_format
=
'
channels_last
'
if
tf
.
test
.
is_built_with_cuda
():
# When running on GPU, transpose the data from channels_last (NHWC) to
# channels_first (NCHW) to improve performance. See
# https://www.tensorflow.org/performance/performance_guide#data_formats
data_format
=
'
channels_first
'
inputs
=
tf
.
transpose
(
inputs
,
[
0
,
3
,
1
,
2
])
# Convolutional Layer #1
# Computes 32 features using a 5x5 filter with ReLU activation.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 28, 28, 1]
# Output Tensor Shape: [batch_size, 28, 28, 32]
conv1
=
tf
.
layers
.
conv2d
(
inputs
=
inputs
,
filters
=
32
,
kernel_size
=
[
5
,
5
],
padding
=
'
same
'
,
activation
=
tf
.
nn
.
relu
,
data_format
=
data_format
)
# Pooling Layer #1
# First max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 28, 28, 32]
# Output Tensor Shape: [batch_size, 14, 14, 32]
pool1
=
tf
.
layers
.
max_pooling2d
(
inputs
=
conv1
,
pool_size
=
[
2
,
2
],
strides
=
2
,
data_format
=
data_format
)
# Convolutional Layer #2
# Computes 64 features using a 5x5 filter.
# Padding is added to preserve width and height.
# Input Tensor Shape: [batch_size, 14, 14, 32]
# Output Tensor Shape: [batch_size, 14, 14, 64]
conv2
=
tf
.
layers
.
conv2d
(
inputs
=
pool1
,
filters
=
64
,
kernel_size
=
[
5
,
5
],
padding
=
'
same
'
,
activation
=
tf
.
nn
.
relu
,
data_format
=
data_format
)
# Pooling Layer #2
# Second max pooling layer with a 2x2 filter and stride of 2
# Input Tensor Shape: [batch_size, 14, 14, 64]
# Output Tensor Shape: [batch_size, 7, 7, 64]
pool2
=
tf
.
layers
.
max_pooling2d
(
inputs
=
conv2
,
pool_size
=
[
2
,
2
],
strides
=
2
,
data_format
=
data_format
)
# Flatten tensor into a batch of vectors
# Input Tensor Shape: [batch_size, 7, 7, 64]
# Output Tensor Shape: [batch_size, 7 * 7 * 64]
pool2_flat
=
tf
.
reshape
(
pool2
,
[
-
1
,
7
*
7
*
64
])
# Dense Layer
# Densely connected layer with 1024 neurons
# Input Tensor Shape: [batch_size, 7 * 7 * 64]
# Output Tensor Shape: [batch_size, 1024]
dense
=
tf
.
layers
.
dense
(
inputs
=
pool2_flat
,
units
=
1024
,
activation
=
tf
.
nn
.
relu
)
# Add dropout operation; 0.6 probability that element will be kept
dropout
=
tf
.
layers
.
dropout
(
inputs
=
dense
,
rate
=
0.4
,
training
=
(
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
))
# Logits layer
# Input Tensor Shape: [batch_size, 1024]
# Output Tensor Shape: [batch_size, 10]
logits
=
tf
.
layers
.
dense
(
inputs
=
dropout
,
units
=
10
)
return
logits
def
model_fn
(
features
,
labels
=
None
,
mode
=
tf
.
estimator
.
ModeKeys
.
TRAIN
):
"""
Model function for MNIST.
"""
keys
=
features
[
'
keys
'
]
features
=
features
[
'
images
'
]
logits
=
mnist_model
(
features
,
mode
)
predictions
=
{
'
classes
'
:
tf
.
argmax
(
input
=
logits
,
axis
=
1
),
'
probabilities
'
:
tf
.
nn
.
softmax
(
logits
,
name
=
'
softmax_tensor
'
),
'
keys
'
:
keys
,
}
if
mode
==
tf
.
estimator
.
ModeKeys
.
PREDICT
:
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
predictions
=
predictions
)
loss
=
tf
.
losses
.
softmax_cross_entropy
(
onehot_labels
=
labels
,
logits
=
logits
)
# Configure the training op
if
mode
==
tf
.
estimator
.
ModeKeys
.
TRAIN
:
optimizer
=
tf
.
train
.
AdamOptimizer
(
learning_rate
=
1e-4
)
train_op
=
optimizer
.
minimize
(
loss
,
tf
.
train
.
get_or_create_global_step
())
else
:
train_op
=
None
accuracy
=
tf
.
metrics
.
accuracy
(
tf
.
argmax
(
labels
,
axis
=
1
),
predictions
[
'
classes
'
])
metrics
=
{
'
accuracy
'
:
accuracy
}
with
tf
.
name_scope
(
'
train_metrics
'
):
# Create a tensor named train_accuracy for logging purposes
tf
.
summary
.
scalar
(
'
train_accuracy
'
,
accuracy
[
1
])
tf
.
summary
.
scalar
(
'
train_loss
'
,
loss
)
return
tf
.
estimator
.
EstimatorSpec
(
mode
=
mode
,
predictions
=
predictions
,
loss
=
loss
,
train_op
=
train_op
,
eval_metric_ops
=
metrics
)
estimator
=
tf
.
estimator
.
Estimator
(
model_fn
=
model_fn
,
model_dir
=
model_dir
,
params
=
None
,
config
=
run_config
)
output
=
train_tfrecords
[
0
]
db
=
Database
()
data
,
labels
=
db
.
data
(
groups
=
'
train
'
)
# output = eval_tfrecords[0]
# db = Database()
# data, labels = db.data(groups='test')
samples
=
zip
(
data
,
labels
,
(
str
(
i
)
for
i
in
range
(
len
(
data
))))
def
reader
(
sample
):
return
sample
This diff is collapsed.
Click to expand it.
bob/learn/tensorflow/examples/mnist/tfrecords.py
deleted
100644 → 0
+
0
−
62
View file @
8b241e4d
# Required objects:
# you need a database object that inherits from
# bob.bio.base.database.BioDatabase (PAD dbs work too)
database
=
Database
()
# the directory pointing to where the processed data is:
data_dir
=
'
/idiap/temp/user/database_name/sub_directory/preprocessed
'
# the directory to save the tfrecords in:
output_dir
=
'
/idiap/temp/user/database_name/sub_directory
'
# A function that converts a BioFile or a PadFile to a label:
# Example for PAD
def
file_to_label
(
f
):
return
f
.
attack_type
is
None
# Example for Bio (You may want to run this script for groups=['world'] only
# in biometric recognition experiments.)
CLIENT_IDS
=
(
str
(
f
.
client_id
)
for
f
in
db
.
all_files
(
groups
=
groups
))
CLIENT_IDS
=
list
(
set
(
CLIENT_IDS
))
CLIENT_IDS
=
dict
(
zip
(
CLIENT_IDS
,
range
(
len
(
CLIENT_IDS
))))
def
file_to_label
(
f
):
return
CLIENT_IDS
[
str
(
f
.
client_id
)]
# Optional objects:
# The groups that you want to create tfrecords for. It should be a list of
# 'world' ('train' in bob.pad.base), 'dev', and 'eval' values. [default:
# 'world']
groups
=
[
'
world
'
]
# you need a reader function that reads the preprocessed files. [default:
# bob.bio.base.utils.load]
reader
=
Preprocessor
().
read_data
reader
=
Extractor
().
read_feature
# or
from
bob.bio.base.utils
import
load
as
reader
# or a reader that casts images to uint8:
def
reader
(
path
):
data
=
bob
.
bio
.
base
.
utils
.
load
(
path
)
return
data
.
astype
(
"
uint8
"
)
# extension of the preprocessed files. [default: '.hdf5']
data_extension
=
'
.hdf5
'
# Shuffle the files before writing them into a tfrecords. [default: False]
shuffle
=
True
# Whether the each file contains one sample or more. [default: True] If
# this is False, the loaded samples from a file are iterated over and each
# of them is saved as an independent feature.
one_file_one_sample
=
True
This diff is collapsed.
Click to expand it.
Preview
0%
Loading
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Save comment
Cancel
Please
register
or
sign in
to comment