Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
7da034f3
Commit
7da034f3
authored
Aug 12, 2016
by
Tiago de Freitas Pereira
Browse files
Still developing
parent
7bd53d0e
Changes
11
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/DataShuffler.py
View file @
7da034f3
...
...
@@ -14,7 +14,7 @@ def scale_mean_norm(data, scale=0.00390625):
class
DataShuffler
(
object
):
def
__init__
(
self
,
data
,
labels
,
perc_train
=
0.9
,
scale
=
True
,
train_batch_size
=
1
,
validation_batch_size
=
1
):
def
__init__
(
self
,
data
,
labels
,
perc_train
=
0.9
,
scale
=
True
,
train_batch_size
=
1
,
validation_batch_size
=
1
00
):
"""
Some base functions for neural networks
...
...
@@ -36,11 +36,14 @@ class DataShuffler(object):
self
.
channels
=
self
.
data
.
shape
[
3
]
self
.
start_shuffler
()
def
get_placeholders
(
self
,
name
=
""
):
data
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
self
.
train_batch_size
,
self
.
width
,
def
get_placeholders
(
self
,
name
=
""
,
train_dataset
=
True
):
batch
=
self
.
train_batch_size
if
train_dataset
else
self
.
validation_batch_size
data
=
tf
.
placeholder
(
tf
.
float32
,
shape
=
(
batch
,
self
.
width
,
self
.
height
,
self
.
channels
),
name
=
name
)
labels
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
self
.
train_batch_size
)
labels
=
tf
.
placeholder
(
tf
.
int64
,
shape
=
batch
)
return
data
,
labels
...
...
@@ -71,7 +74,12 @@ class DataShuffler(object):
self
.
train_data
,
self
.
mean
=
scale_mean_norm
(
self
.
train_data
)
self
.
validation_data
=
(
self
.
validation_data
-
self
.
mean
)
*
self
.
scale_value
def
get_batch
(
self
,
n_samples
,
train_dataset
=
True
):
def
get_batch
(
self
,
train_dataset
=
True
):
if
train_dataset
:
n_samples
=
self
.
train_batch_size
else
:
n_samples
=
self
.
validation_batch_size
if
train_dataset
:
data
=
self
.
train_data
...
...
bob/learn/tensorflow/layers/Conv2D.py
View file @
7da034f3
...
...
@@ -41,23 +41,33 @@ class Conv2D(Layer):
self
.
W
=
None
self
.
b
=
None
def
create_variables
(
self
,
input
):
self
.
input
=
input
def
create_variables
(
self
,
input_layer
):
self
.
input_layer
=
input_layer
# TODO: Do an assert here
if
len
(
input_layer
.
get_shape
().
as_list
())
!=
4
:
raise
ValueError
(
"The input as a convolutional layer must have 4 dimensions, "
"but {0} were provided"
.
format
(
len
(
input_layer
.
get_shape
().
as_list
())))
n_channels
=
input_layer
.
get_shape
().
as_list
()[
3
]
if
self
.
W
is
None
:
self
.
W
=
create_weight_variables
([
self
.
kernel_size
,
self
.
kernel_size
,
1
,
self
.
filters
],
self
.
W
=
create_weight_variables
([
self
.
kernel_size
,
self
.
kernel_size
,
n_channels
,
self
.
filters
],
seed
=
self
.
seed
,
name
=
str
(
self
.
name
),
use_gpu
=
self
.
use_gpu
)
if
self
.
activation
is
not
None
:
self
.
b
=
create_bias_variables
([
self
.
filters
],
name
=
str
(
self
.
name
)
+
"bias"
,
use_gpu
=
self
.
use_gpu
)
def
get_graph
(
self
):
with
tf
.
name_scope
(
str
(
self
.
name
)):
conv2d
=
tf
.
nn
.
conv2d
(
self
.
input
,
self
.
W
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
conv2d
=
tf
.
nn
.
conv2d
(
self
.
input
_layer
,
self
.
W
,
strides
=
[
1
,
1
,
1
,
1
],
padding
=
'SAME'
)
if
self
.
activation
is
not
None
:
with
tf
.
name_scope
(
str
(
self
.
name
)
+
'activation'
):
non_linear_conv2d
=
tf
.
nn
.
tanh
(
tf
.
nn
.
bias_add
(
conv2d
,
self
.
b
))
self
.
output
=
non_linear_conv2d
output
=
non_linear_conv2d
else
:
self
.
output
=
conv2d
output
=
conv2d
return
self
.
output
return
output
bob/learn/tensorflow/layers/FullyConnected.py
View file @
7da034f3
...
...
@@ -35,11 +35,12 @@ class FullyConnected(Layer):
self
.
output_dim
=
output_dim
self
.
W
=
None
self
.
b
=
None
self
.
shape
=
None
def
create_variables
(
self
,
input
):
self
.
input
=
input
def
create_variables
(
self
,
input
_layer
):
self
.
input
_layer
=
input
_layer
if
self
.
W
is
None
:
input_dim
=
reduce
(
mul
,
self
.
input
.
get_shape
().
as_list
())
input_dim
=
reduce
(
mul
,
self
.
input
_layer
.
get_shape
().
as_list
())
self
.
W
=
create_weight_variables
([
input_dim
,
self
.
output_dim
],
seed
=
self
.
seed
,
name
=
str
(
self
.
name
),
use_gpu
=
self
.
use_gpu
)
...
...
@@ -49,17 +50,15 @@ class FullyConnected(Layer):
def
get_graph
(
self
):
with
tf
.
name_scope
(
'fc'
):
if
len
(
self
.
input
.
get_shape
())
==
4
:
shape
=
self
.
input
.
get_shape
().
as_list
()
fc
=
tf
.
reshape
(
self
.
input
,
[
shape
[
0
],
shape
[
1
]
*
shape
[
2
]
*
shape
[
3
]])
if
len
(
self
.
input
_layer
.
get_shape
())
==
4
:
shape
=
self
.
input
_layer
.
get_shape
().
as_list
()
fc
=
tf
.
reshape
(
self
.
input
_layer
,
[
shape
[
0
],
shape
[
1
]
*
shape
[
2
]
*
shape
[
3
]])
if
self
.
activation
is
not
None
:
with
tf
.
name_scope
(
'activation'
):
non_linear_fc
=
tf
.
nn
.
tanh
(
tf
.
matmul
(
fc
,
self
.
W
)
+
self
.
b
)
self
.
output
=
non_linear_fc
output
=
non_linear_fc
else
:
self
.
output
=
fc
return
self
.
output
output
=
fc
return
output
bob/learn/tensorflow/layers/InputLayer.py
0 → 100644
View file @
7da034f3
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 17:38 CEST
import
tensorflow
as
tf
from
bob.learn.tensorflow.util
import
*
from
.Layer
import
Layer
class
InputLayer
(
Layer
):
def
__init__
(
self
,
name
,
input_data
,
use_gpu
=
False
):
"""
Constructor
"""
super
(
InputLayer
,
self
).
__init__
(
name
,
use_gpu
=
False
)
self
.
original_layer
=
input_data
self
.
__shape
=
input_data
.
get_shape
()
def
create_variables
(
self
,
input_layer
):
return
def
get_graph
(
self
):
return
self
.
original_layer
bob/learn/tensorflow/layers/Layer.py
View file @
7da034f3
...
...
@@ -25,18 +25,11 @@ class Layer(object):
self
.
use_gpu
=
use_gpu
self
.
seed
=
seed
self
.
input
=
None
self
.
activation
=
None
self
.
output
=
None
self
.
input_layer
=
None
self
.
activation
=
activation
def
create_variables
(
self
,
input
):
def
create_variables
(
self
,
input
_layer
):
NotImplementedError
(
"Please implement this function in derived classes"
)
def
get_graph
(
self
):
NotImplementedError
(
"Please implement this function in derived classes"
)
def
get_shape
(
self
):
if
self
.
output
is
None
:
NotImplementedError
(
"This class was not implemented properly"
)
else
:
return
self
.
output
.
get_shape
()
bob/learn/tensorflow/layers/MaxPooling.py
View file @
7da034f3
...
...
@@ -16,12 +16,12 @@ class MaxPooling(Layer):
"""
super
(
MaxPooling
,
self
).
__init__
(
name
,
use_gpu
=
False
)
def
create_variables
(
self
,
input
):
self
.
input
=
input
def
create_variables
(
self
,
input
_layer
):
self
.
input
_layer
=
input
_layer
return
def
get_graph
(
self
):
with
tf
.
name_scope
(
str
(
self
.
name
)):
self
.
output
=
tf
.
nn
.
max_pool
(
self
.
input
,
ksize
=
[
1
,
2
,
2
,
1
],
strides
=
[
1
,
2
,
2
,
1
],
padding
=
'SAME'
)
output
=
tf
.
nn
.
max_pool
(
self
.
input
_layer
,
ksize
=
[
1
,
2
,
2
,
1
],
strides
=
[
1
,
2
,
2
,
1
],
padding
=
'SAME'
)
return
self
.
output
return
output
bob/learn/tensorflow/layers/__init__.py
View file @
7da034f3
...
...
@@ -7,5 +7,6 @@ from .Layer import Layer
from
.Conv2D
import
Conv2D
from
.FullyConnected
import
FullyConnected
from
.MaxPooling
import
MaxPooling
from
.InputLayer
import
InputLayer
bob/learn/tensorflow/network/Lenet.py
View file @
7da034f3
...
...
@@ -43,9 +43,9 @@ class Lenet(SequenceNetwork):
"""
super
(
Lenet
,
self
).
__init__
()
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
))
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
,
activation
=
True
))
self
.
add
(
MaxPooling
(
name
=
"pooling1"
))
self
.
add
(
Conv2D
(
name
=
"conv2"
,
kernel_size
=
conv2_kernel_size
,
filters
=
conv2_output
))
self
.
add
(
Conv2D
(
name
=
"conv2"
,
kernel_size
=
conv2_kernel_size
,
filters
=
conv2_output
,
activation
=
True
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
))
self
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
fc1_output
,
activation
=
True
))
self
.
add
(
FullyConnected
(
name
=
"fc
1
"
,
output_dim
=
n_classes
,
activation
=
Fals
e
))
self
.
add
(
FullyConnected
(
name
=
"fc
2
"
,
output_dim
=
n_classes
,
activation
=
Non
e
))
bob/learn/tensorflow/network/SequenceNetwork.py
View file @
7da034f3
...
...
@@ -13,7 +13,7 @@ import abc
import
six
from
collections
import
OrderedDict
from
bob.learn.tensorflow.layers
import
*
from
bob.learn.tensorflow.layers
import
Layer
class
SequenceNetwork
(
six
.
with_metaclass
(
abc
.
ABCMeta
,
object
)):
...
...
@@ -38,18 +38,10 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
def
compute_graph
(
self
,
input_data
):
input_offset
=
input_data
for
k
in
self
.
sequence_net
.
keys
():
print
k
import
ipdb
;
ipdb
.
set_trace
();
current_layer
=
self
.
sequence_net
[
k
]
current_layer
.
create_variables
(
input_offset
)
input_offset
=
current_layer
.
get_graph
input_offset
=
current_layer
.
get_graph
()
return
input_offset
bob/learn/tensorflow/trainers/Trainer.py
View file @
7da034f3
...
...
@@ -8,6 +8,8 @@ logger = logging.getLogger("bob.learn.tensorflow")
from
..DataShuffler
import
DataShuffler
import
tensorflow
as
tf
from
..network
import
SequenceNetwork
import
numpy
from
bob.learn.tensorflow.layers
import
InputLayer
class
Trainer
(
object
):
...
...
@@ -49,26 +51,37 @@ class Trainer(object):
"""
train_placeholder_data
,
train_placeholder_labels
=
data_shuffler
.
get_placeholders
(
name
=
"train"
)
validation_placeholder_data
,
validation_placeholder_labels
=
data_shuffler
.
get_placeholders
(
name
=
"validation"
)
validation_placeholder_data
,
validation_placeholder_labels
=
data_shuffler
.
get_placeholders
(
name
=
"validation"
,
train_dataset
=
False
)
# Creating the architecture for train and validation
if
not
isinstance
(
self
.
architecture
,
SequenceNetwork
):
raise
ValueError
(
"The variable `architecture` must be an instance of "
"`bob.learn.tensorflow.network.SequenceNetwork`"
)
#input_layer = InputLayer(name="input", input_data=train_placeholder_data)
import
ipdb
;
ipdb
.
set_trace
();
train_graph
=
self
.
architecture
.
compute_graph
(
train_placeholder_data
)
loss_instance
=
tf
.
reduce_mean
(
self
.
loss
(
train_graph
))
validation_graph
=
self
.
architecture
.
compute_graph
(
validation_placeholder_data
)
loss_train
=
tf
.
reduce_mean
(
self
.
loss
(
train_graph
,
train_placeholder_labels
))
loss_validation
=
tf
.
reduce_mean
(
self
.
loss
(
validation_graph
,
validation_placeholder_labels
))
batch
=
tf
.
Variable
(
0
)
learning_rate
=
tf
.
train
.
exponential_decay
(
self
.
base_lr
,
# Learning rate
batch
*
self
.
train_batch_size
,
batch
*
data_shuffler
.
train_batch_size
,
data_shuffler
.
train_data
.
shape
[
0
],
self
.
weight_decay
# Decay step
)
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
learning_rate
).
minimize
(
self
.
loss_instance
,
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
learning_rate
).
minimize
(
loss_train
,
global_step
=
batch
)
train_prediction
=
tf
.
nn
.
softmax
(
train_graph
)
validation_prediction
=
tf
.
nn
.
softmax
(
validation_graph
)
print
(
"Initializing !!"
)
# Training
...
...
@@ -76,22 +89,26 @@ class Trainer(object):
tf
.
initialize_all_variables
().
run
()
for
step
in
range
(
self
.
iterations
):
train_data
,
train_labels
=
data_shuffler
.
get_batch
(
self
.
train_batch_size
)
train_data
,
train_labels
=
data_shuffler
.
get_batch
()
feed_dict
=
{
train_placeholder_data
:
train_data
,
train_placeholder_labels
:
train_labels
}
_
,
l
,
lr
,
predictions
=
session
.
run
([
self
.
optimizer
,
self
.
loss_instance
,
self
.
learning_rate
,
train_prediction
],
feed_dict
=
feed_dict
)
_
,
l
,
lr
,
_
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
train_prediction
],
feed_dict
=
feed_dict
)
if
step
%
self
.
snapshot
==
0
:
validation_data
,
validation_labels
=
data_shuffler
.
get_batch
(
data_shuffler
.
validation_data
.
shape
[
0
],
train_dataset
=
False
)
validation_data
,
validation_labels
=
data_shuffler
.
get_batch
(
train_dataset
=
False
)
feed_dict
=
{
validation_placeholder_data
:
validation_data
,
validation_placeholder_labels
:
validation_labels
}
l
,
predictions
=
session
.
run
([
self
.
loss_instance
,
train_prediction
],
feed_dict
=
feed_dict
)
print
(
"Step {0}. Loss = {1}, Lr={2}"
.
format
(
step
,
l
,
predictions
))
import
ipdb
;
ipdb
.
set_trace
();
l
,
predictions
=
session
.
run
([
loss_validation
,
validation_prediction
],
feed_dict
=
feed_dict
)
accuracy
=
100.
*
numpy
.
sum
(
numpy
.
argmax
(
predictions
,
1
)
==
validation_labels
)
/
predictions
.
shape
[
0
]
print
"Step {0}. Loss = {1}, Acc Validation={2}"
.
format
(
step
,
l
,
accuracy
)
#accuracy = util.evaluate_softmax(validation_data, validation_labels, session, validation_prediction,
# validation_data_node)
...
...
bob/learn/tensorflow/trainers/__init__.py
View file @
7da034f3
...
...
@@ -5,4 +5,18 @@ __path__ = extend_path(__path__, __name__)
from
..DataShuffler
import
DataShuffler
from
.Trainer
import
Trainer
import
numpy
def
evaluate_softmax
(
data
,
labels
,
session
,
graph
,
data_node
):
"""
Evaluate the network assuming that the output layer is a softmax
"""
predictions
=
numpy
.
argmax
(
session
.
run
(
graph
,
feed_dict
=
{
data_node
:
data
[:]}),
1
)
return
100.
*
numpy
.
sum
(
predictions
==
labels
)
/
predictions
.
shape
[
0
]
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment