Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
f3f96b38
Commit
f3f96b38
authored
Sep 12, 2016
by
Tiago de Freitas Pereira
Browse files
Tests with dropout
parent
cebdb323
Changes
15
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/initialization/Constant.py
View file @
f3f96b38
...
...
@@ -31,7 +31,7 @@ class Constant(Initialization):
initializer
=
tf
.
constant
(
self
.
constant_value
,
shape
=
shape
)
if
self
.
use_gpu
:
with
tf
.
device
(
"/gpu"
):
with
tf
.
device
(
"/gpu
:0
"
):
return
tf
.
get_variable
(
name
,
initializer
=
initializer
,
dtype
=
tf
.
float32
)
else
:
with
tf
.
device
(
"/cpu"
):
...
...
bob/learn/tensorflow/initialization/Gaussian.py
View file @
f3f96b38
...
...
@@ -37,7 +37,7 @@ class Gaussian(Initialization):
seed
=
self
.
seed
)
if
self
.
use_gpu
:
with
tf
.
device
(
"/gpu"
):
with
tf
.
device
(
"/gpu
:0
"
):
return
tf
.
get_variable
(
name
,
initializer
=
initializer
,
dtype
=
tf
.
float32
)
else
:
with
tf
.
device
(
"/cpu"
):
...
...
bob/learn/tensorflow/initialization/SimplerXavier.py
View file @
f3f96b38
...
...
@@ -39,7 +39,7 @@ class SimplerXavier(Initialization):
initializer
=
tf
.
truncated_normal
(
shape
,
stddev
=
stddev
,
seed
=
self
.
seed
)
if
self
.
use_gpu
:
with
tf
.
device
(
"/gpu"
):
with
tf
.
device
(
"/gpu
:0
"
):
return
tf
.
get_variable
(
name
,
initializer
=
initializer
,
dtype
=
tf
.
float32
)
else
:
with
tf
.
device
(
"/cpu"
):
...
...
bob/learn/tensorflow/initialization/Xavier.py
View file @
f3f96b38
...
...
@@ -35,12 +35,12 @@ class Xavier(Initialization):
in_out
=
shape
[
0
]
+
shape
[
1
]
import
math
stddev
=
math
.
sqrt
(
2
.0
/
in_out
)
# XAVIER INITIALIZER (GAUSSIAN)
stddev
=
math
.
sqrt
(
3
.0
/
in_out
)
# XAVIER INITIALIZER (GAUSSIAN)
initializer
=
tf
.
truncated_normal
(
shape
,
stddev
=
stddev
,
seed
=
self
.
seed
)
if
self
.
use_gpu
:
with
tf
.
device
(
"/gpu"
):
with
tf
.
device
(
"/gpu
:0
"
):
return
tf
.
get_variable
(
name
,
initializer
=
initializer
,
dtype
=
tf
.
float32
)
else
:
with
tf
.
device
(
"/cpu"
):
...
...
bob/learn/tensorflow/layers/Dropout.py
0 → 100644
View file @
f3f96b38
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 17:38 CEST
import
tensorflow
as
tf
from
.Layer
import
Layer
from
operator
import
mul
class
Dropout
(
Layer
):
"""
Dropout
"""
def
__init__
(
self
,
name
,
keep_prob
=
0.99
,
seed
=
10.
):
"""
Constructor
**Parameters**
input: Layer input
activation: Tensor Flow activation
initialization: Initialization type
use_gpu: Store data in the GPU
seed: Seed for the Random number generation
"""
super
(
Dropout
,
self
).
__init__
(
name
=
name
)
self
.
keep_prob
=
keep_prob
self
.
seed
=
seed
def
create_variables
(
self
,
input_layer
):
self
.
input_layer
=
input_layer
return
def
get_graph
(
self
):
with
tf
.
name_scope
(
str
(
self
.
name
)):
output
=
tf
.
nn
.
dropout
(
self
.
input_layer
,
self
.
keep_prob
,
name
=
self
.
name
)
return
output
bob/learn/tensorflow/layers/MaxPooling.py
View file @
f3f96b38
...
...
@@ -10,7 +10,7 @@ from .Layer import Layer
class
MaxPooling
(
Layer
):
def
__init__
(
self
,
name
,
use_gpu
=
False
):
def
__init__
(
self
,
name
):
"""
Constructor
"""
...
...
bob/learn/tensorflow/layers/__init__.py
View file @
f3f96b38
...
...
@@ -7,6 +7,7 @@ from .Layer import Layer
from
.Conv2D
import
Conv2D
from
.FullyConnected
import
FullyConnected
from
.MaxPooling
import
MaxPooling
from
.Dropout
import
Dropout
from
.InputLayer
import
InputLayer
# gets sphinx autodoc done right - don't remove it
...
...
bob/learn/tensorflow/network/Lenet.py
View file @
f3f96b38
...
...
@@ -29,7 +29,7 @@ class Lenet(SequenceNetwork):
default_feature_layer
=
"fc2"
,
seed
=
10
,
use_gpu
=
False
):
use_gpu
=
False
):
"""
Create all the necessary variables for this CNN
...
...
@@ -45,28 +45,29 @@ class Lenet(SequenceNetwork):
seed = 10
"""
super
(
Lenet
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
)
super
(
Lenet
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
,
use_gpu
=
use_gpu
)
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
),
bias_initialization
=
Constant
()
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling1"
))
self
.
add
(
Conv2D
(
name
=
"conv2"
,
kernel_size
=
conv2_kernel_size
,
filters
=
conv2_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
),
bias_initialization
=
Constant
()
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
))
self
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
fc1_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
),
bias_initialization
=
Constant
()
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc2"
,
output_dim
=
n_classes
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
),
bias_initialization
=
Constant
()))
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)))
bob/learn/tensorflow/network/LenetDropout.py
0 → 100644
View file @
f3f96b38
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Class that creates the lenet architecture
"""
import
tensorflow
as
tf
from
.SequenceNetwork
import
SequenceNetwork
from
..layers
import
Conv2D
,
FullyConnected
,
MaxPooling
,
Dropout
import
bob.learn.tensorflow
from
bob.learn.tensorflow.initialization
import
Xavier
from
bob.learn.tensorflow.initialization
import
Constant
class
LenetDropout
(
SequenceNetwork
):
def
__init__
(
self
,
conv1_kernel_size
=
5
,
conv1_output
=
16
,
conv2_kernel_size
=
5
,
conv2_output
=
32
,
fc1_output
=
400
,
n_classes
=
10
,
default_feature_layer
=
"fc2"
,
seed
=
10
,
use_gpu
=
False
):
"""
Create all the necessary variables for this CNN
**Parameters**
conv1_kernel_size=5,
conv1_output=32,
conv2_kernel_size=5,
conv2_output=64,
fc1_output=400,
n_classes=10
seed = 10
"""
super
(
LenetDropout
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
,
use_gpu
=
use_gpu
)
self
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
conv1_kernel_size
,
filters
=
conv1_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling1"
))
self
.
add
(
Conv2D
(
name
=
"conv2"
,
kernel_size
=
conv2_kernel_size
,
filters
=
conv2_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
))
self
.
add
(
MaxPooling
(
name
=
"pooling2"
))
self
.
add
(
Dropout
(
name
=
"dropout"
))
self
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
fc1_output
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)
))
self
.
add
(
FullyConnected
(
name
=
"fc2"
,
output_dim
=
n_classes
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
bias_initialization
=
Constant
(
use_gpu
=
self
.
use_gpu
)))
bob/learn/tensorflow/network/MLP.py
0 → 100644
View file @
f3f96b38
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Wed 11 May 2016 09:39:36 CEST
"""
Class that creates the lenet architecture
"""
import
tensorflow
as
tf
from
.SequenceNetwork
import
SequenceNetwork
from
..layers
import
Conv2D
,
FullyConnected
,
MaxPooling
from
bob.learn.tensorflow.initialization
import
Xavier
from
bob.learn.tensorflow.initialization
import
Constant
class
MLP
(
SequenceNetwork
):
def
__init__
(
self
,
output_shape
,
hidden_layers
=
[
10
],
hidden_activation
=
tf
.
nn
.
tanh
,
output_activation
=
None
,
weights_initialization
=
Xavier
(),
bias_initialization
=
Constant
(),
use_gpu
=
False
):
"""
Create all the necessary variables for this CNN
**Parameters**
output_shape: Shape of the output
hidden_layers: List that contains the amount of hidden layers, where each element is the number of neurons
hidden_activation: Activation function of the hidden layer. If you set to `None`, the activation will be linear
output_activation: Activation of the output layer. If you set to `None`, the activation will be linear
seed = 10
"""
super
(
MLP
,
self
).
__init__
(
use_gpu
=
use_gpu
)
if
(
not
(
isinstance
(
hidden_layers
,
list
)
or
isinstance
(
hidden_layers
,
tuple
)))
or
len
(
hidden_layers
)
==
0
:
raise
ValueError
(
"Invalid input for hidden_layers: {0} "
.
format
(
hidden_layers
))
for
i
in
range
(
len
(
hidden_layers
)):
l
=
hidden_layers
[
i
]
self
.
add
(
FullyConnected
(
name
=
"fc{0}"
.
format
(
i
),
output_dim
=
l
,
activation
=
hidden_activation
,
weights_initialization
=
weights_initialization
,
bias_initialization
=
bias_initialization
))
self
.
add
(
FullyConnected
(
name
=
"fc_output"
,
output_dim
=
output_shape
,
activation
=
output_activation
,
weights_initialization
=
weights_initialization
,
bias_initialization
=
bias_initialization
))
bob/learn/tensorflow/network/SequenceNetwork.py
View file @
f3f96b38
...
...
@@ -13,7 +13,7 @@ import six
import
os
from
collections
import
OrderedDict
from
bob.learn.tensorflow.layers
import
Layer
,
MaxPooling
from
bob.learn.tensorflow.layers
import
Layer
,
MaxPooling
,
Dropout
class
SequenceNetwork
(
six
.
with_metaclass
(
abc
.
ABCMeta
,
object
)):
...
...
@@ -22,7 +22,8 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
"""
def
__init__
(
self
,
default_feature_layer
=
None
):
default_feature_layer
=
None
,
use_gpu
=
False
):
"""
Base constructor
...
...
@@ -34,6 +35,7 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
self
.
default_feature_layer
=
default_feature_layer
self
.
input_divide
=
1.
self
.
input_subtract
=
0.
self
.
use_gpu
=
use_gpu
#self.saver = None
def
add
(
self
,
layer
):
...
...
@@ -45,7 +47,7 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
raise
ValueError
(
"Input `layer` must be an instance of `bob.learn.tensorflow.layers.Layer`"
)
self
.
sequence_net
[
layer
.
name
]
=
layer
def
compute_graph
(
self
,
input_data
,
feature_layer
=
None
):
def
compute_graph
(
self
,
input_data
,
feature_layer
=
None
,
training
=
True
):
"""
Given the current network, return the Tensorflow graph
...
...
@@ -57,11 +59,13 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
input_offset
=
input_data
for
k
in
self
.
sequence_net
.
keys
():
current_layer
=
self
.
sequence_net
[
k
]
current_layer
.
create_variables
(
input_offset
)
input_offset
=
current_layer
.
get_graph
()
if
feature_layer
is
not
None
and
k
==
feature_layer
:
return
input_offset
if
training
or
not
isinstance
(
current_layer
,
Dropout
):
current_layer
.
create_variables
(
input_offset
)
input_offset
=
current_layer
.
get_graph
()
if
feature_layer
is
not
None
and
k
==
feature_layer
:
return
input_offset
return
input_offset
...
...
@@ -80,14 +84,14 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
if
feature_layer
is
None
:
feature_layer
=
self
.
default_feature_layer
return
session
.
run
([
self
.
compute_graph
(
feature_placeholder
,
feature_layer
)],
feed_dict
=
feed_dict
)[
0
]
return
session
.
run
([
self
.
compute_graph
(
feature_placeholder
,
feature_layer
,
training
=
False
)],
feed_dict
=
feed_dict
)[
0
]
def
dump_variables
(
self
):
variables
=
{}
for
k
in
self
.
sequence_net
:
# TODO: IT IS NOT SMART TESTING ALONG THIS PAGE
if
not
isinstance
(
self
.
sequence_net
[
k
],
MaxPooling
):
if
not
isinstance
(
self
.
sequence_net
[
k
],
MaxPooling
)
and
not
isinstance
(
self
.
sequence_net
[
k
],
Dropout
)
:
variables
[
self
.
sequence_net
[
k
].
W
.
name
]
=
self
.
sequence_net
[
k
].
W
variables
[
self
.
sequence_net
[
k
].
b
.
name
]
=
self
.
sequence_net
[
k
].
b
...
...
bob/learn/tensorflow/network/__init__.py
View file @
f3f96b38
...
...
@@ -4,6 +4,7 @@ __path__ = extend_path(__path__, __name__)
from
.SequenceNetwork
import
SequenceNetwork
from
.Lenet
import
Lenet
from
.LenetDropout
import
LenetDropout
from
.MLP
import
MLP
# gets sphinx autodoc done right - don't remove it
...
...
bob/learn/tensorflow/script/train_mnist.py
View file @
f3f96b38
...
...
@@ -64,7 +64,7 @@ def main():
train_objects
=
db
.
objects
(
protocol
=
"male"
,
groups
=
"world"
)
train_labels
=
[
o
.
client_id
for
o
in
train_objects
]
train_file_names
=
[
o
.
make_path
(
directory
=
"/
remote/lustre/2/temp/tpereira/FACEREC_EXPERIMENTS/mobio_male/lda
/preprocessed"
,
directory
=
"/
idiap/user/tpereira/face/baselines/eigenface
/preprocessed"
,
extension
=
".hdf5"
)
for
o
in
train_objects
]
...
...
@@ -77,7 +77,7 @@ def main():
validation_objects
=
db
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
)
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
validation_file_names
=
[
o
.
make_path
(
directory
=
"/
remote/lustre/2/temp/tpereira/FACEREC_EXPERIMENTS/mobio_male/lda
/preprocessed"
,
directory
=
"/
idiap/user/tpereira/face/baselines/eigenface
/preprocessed"
,
extension
=
".hdf5"
)
for
o
in
validation_objects
]
...
...
bob/learn/tensorflow/script/train_mnist_siamese.py
View file @
f3f96b38
...
...
@@ -23,7 +23,7 @@ import tensorflow as tf
from
..
import
util
SEED
=
10
from
bob.learn.tensorflow.data
import
MemoryDataShuffler
,
TextDataShuffler
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
from
bob.learn.tensorflow.network
import
Lenet
,
MLP
,
LenetDropout
from
bob.learn.tensorflow.trainers
import
SiameseTrainer
from
bob.learn.tensorflow.loss
import
ContrastiveLoss
import
numpy
...
...
@@ -39,7 +39,7 @@ def main():
perc_train
=
0.9
# Loading data
mnist
=
Tru
e
mnist
=
Fals
e
if
mnist
:
train_data
,
train_labels
,
validation_data
,
validation_labels
=
\
...
...
@@ -66,7 +66,7 @@ def main():
train_objects
=
db
.
objects
(
protocol
=
"male"
,
groups
=
"world"
)
train_labels
=
[
o
.
client_id
for
o
in
train_objects
]
train_file_names
=
[
o
.
make_path
(
directory
=
"/
remote/lustre/2/temp/tpereira/FACEREC_EXPERIMENTS/mobio_male/lda
/preprocessed"
,
directory
=
"/
idiap/user/tpereira/face/baselines/eigenface
/preprocessed"
,
extension
=
".hdf5"
)
for
o
in
train_objects
]
...
...
@@ -78,7 +78,7 @@ def main():
validation_objects
=
db
.
objects
(
protocol
=
"male"
,
groups
=
"dev"
)
validation_labels
=
[
o
.
client_id
for
o
in
validation_objects
]
validation_file_names
=
[
o
.
make_path
(
directory
=
"/
remote/lustre/2/temp/tpereira/FACEREC_EXPERIMENTS/mobio_male/lda
/preprocessed"
,
directory
=
"/
idiap/user/tpereira/face/baselines/eigenface
/preprocessed"
,
extension
=
".hdf5"
)
for
o
in
validation_objects
]
...
...
@@ -92,7 +92,8 @@ def main():
cnn
=
True
if
cnn
:
lenet
=
Lenet
(
default_feature_layer
=
"fc2"
,
n_classes
=
n_classes
)
lenet
=
Lenet
(
default_feature_layer
=
"fc2"
,
n_classes
=
n_classes
,
conv1_output
=
4
,
conv2_output
=
8
,
use_gpu
=
USE_GPU
)
#lenet = LenetDropout(default_feature_layer="fc2", n_classes=n_classes, conv1_output=4, conv2_output=8, use_gpu=USE_GPU)
loss
=
ContrastiveLoss
()
trainer
=
SiameseTrainer
(
architecture
=
lenet
,
...
...
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
f3f96b38
...
...
@@ -45,7 +45,6 @@ class SiameseTrainer(Trainer):
snapshot
=
snapshot
)
def
train
(
self
,
train_data_shuffler
,
validation_data_shuffler
=
None
):
"""
Do the loop forward --> backward --|
...
...
@@ -116,8 +115,11 @@ class SiameseTrainer(Trainer):
train_right_graph
)
# Preparing the optimizer
step
=
tf
.
Variable
(
0
)
self
.
optimizer
.
_learning_rate
=
learning_rate
optimizer
=
self
.
optimizer
.
minimize
(
loss_train
,
global_step
=
tf
.
Variable
(
0
))
optimizer
=
self
.
optimizer
.
minimize
(
loss_train
,
global_step
=
step
)
#optimizer = tf.train.MomentumOptimizer(learning_rate, momentum=0.99, use_locking=False,
# name='Momentum').minimize(loss_train, global_step=step)
print
(
"Initializing !!"
)
# Training
...
...
@@ -146,6 +148,7 @@ class SiameseTrainer(Trainer):
for
step
in
range
(
self
.
iterations
):
_
,
l
,
lr
,
summary
=
session
.
run
([
optimizer
,
loss_train
,
learning_rate
,
merged
])
#_, l, lr= session.run([optimizer, loss_train, learning_rate])
train_writer
.
add_summary
(
summary
,
step
)
if
validation_data_shuffler
is
not
None
and
step
%
self
.
snapshot
==
0
:
...
...
@@ -160,4 +163,3 @@ class SiameseTrainer(Trainer):
thread_pool
.
request_stop
()
thread_pool
.
join
(
threads
)
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment