Skip to content
GitLab
Projects
Groups
Snippets
/
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
1a517ecd
Commit
1a517ecd
authored
Nov 11, 2016
by
Tiago de Freitas Pereira
Browse files
DOcumenting
parent
db9747e1
Changes
18
Hide whitespace changes
Inline
Side-by-side
bob/learn/tensorflow/datashuffler/Memory.py
View file @
1a517ecd
...
...
@@ -60,7 +60,7 @@ class Memory(Base):
indexes
=
numpy
.
array
(
range
(
self
.
data
.
shape
[
0
]))
numpy
.
random
.
shuffle
(
indexes
)
selected_data
=
self
.
data
[
indexes
[
0
:
self
.
batch_size
],
:,
:,
:
]
selected_data
=
self
.
data
[
indexes
[
0
:
self
.
batch_size
],
...
]
selected_labels
=
self
.
labels
[
indexes
[
0
:
self
.
batch_size
]]
# Applying the data augmentation
...
...
bob/learn/tensorflow/datashuffler/SiameseDisk.py
View file @
1a517ecd
...
...
@@ -75,13 +75,10 @@ class SiameseDisk(Siamese, Disk):
genuine
=
True
for
i
in
range
(
self
.
shape
[
0
]):
file_name
,
file_name_p
=
self
.
get_genuine_or_not
(
self
.
data
,
self
.
labels
,
genuine
=
genuine
)
sample_l
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name
))
sample_r
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_p
))
sample_l
[
i
,
...]
=
self
.
normalize_sample
(
self
.
load_from_file
(
str
(
file_name
))
)
sample_r
[
i
,
...]
=
self
.
normalize_sample
(
self
.
load_from_file
(
str
(
file_name_p
))
)
labels_siamese
[
i
]
=
not
genuine
genuine
=
not
genuine
sample_l
=
self
.
normalize_sample
(
sample_l
)
sample_r
=
self
.
normalize_sample
(
sample_r
)
return
sample_l
,
sample_r
,
labels_siamese
bob/learn/tensorflow/datashuffler/TripletDisk.py
View file @
1a517ecd
...
...
@@ -78,12 +78,8 @@ class TripletDisk(Triplet, Disk):
for
i
in
range
(
self
.
shape
[
0
]):
file_name_a
,
file_name_p
,
file_name_n
=
self
.
get_one_triplet
(
self
.
data
,
self
.
labels
)
sample_a
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_a
))
sample_p
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_p
))
sample_n
[
i
,
...]
=
self
.
load_from_file
(
str
(
file_name_n
))
sample_a
=
self
.
normalize_sample
(
sample_a
)
sample_p
=
self
.
normalize_sample
(
sample_p
)
sample_n
=
self
.
normalize_sample
(
sample_n
)
sample_a
[
i
,
...]
=
self
.
normalize_sample
(
self
.
load_from_file
(
str
(
file_name_a
)))
sample_p
[
i
,
...]
=
self
.
normalize_sample
(
self
.
load_from_file
(
str
(
file_name_p
)))
sample_n
[
i
,
...]
=
self
.
normalize_sample
(
self
.
load_from_file
(
str
(
file_name_n
)))
return
[
sample_a
,
sample_p
,
sample_n
]
bob/learn/tensorflow/network/SequenceNetwork.py
View file @
1a517ecd
...
...
@@ -322,8 +322,8 @@ class SequenceNetwork(six.with_metaclass(abc.ABCMeta, object)):
session
=
Session
.
instance
().
session
self
.
sequence_net
=
pickle
.
loads
(
open
(
path
+
"_sequence_net.pickle"
).
read
())
#
saver = tf.train.import_meta_graph(path + ".meta", clear_devices=clear_devices)
saver
=
tf
.
train
.
import_meta_graph
(
path
+
".meta"
)
saver
=
tf
.
train
.
import_meta_graph
(
path
+
".meta"
,
clear_devices
=
clear_devices
)
#
saver = tf.train.import_meta_graph(path + ".meta")
saver
.
restore
(
session
,
path
)
self
.
inference_graph
=
tf
.
get_collection
(
"inference_graph"
)[
0
]
self
.
inference_placeholder
=
tf
.
get_collection
(
"inference_placeholder"
)[
0
]
...
...
bob/learn/tensorflow/network/VGG16_mod.py
View file @
1a517ecd
...
...
@@ -71,10 +71,13 @@ class VGG16_mod(SequenceNetwork):
default_feature_layer
=
"fc8"
,
seed
=
10
,
do_dropout
=
True
,
use_gpu
=
False
):
super
(
VGG16_mod
,
self
).
__init__
(
default_feature_layer
=
default_feature_layer
,
use_gpu
=
use_gpu
)
use_gpu
=
use_gpu
)
# First convolutional block
self
.
conv1_1_kernel_size
=
conv1_1_kernel_size
...
...
@@ -223,6 +226,9 @@ class VGG16_mod(SequenceNetwork):
))
self
.
add
(
AveragePooling
(
name
=
"pooling5"
,
strides
=
[
1
,
2
,
2
,
1
]))
if
do_dropout
:
self
.
add
(
Dropout
(
name
=
"dropout"
,
keep_prob
=
0.4
))
self
.
add
(
FullyConnected
(
name
=
"fc8"
,
output_dim
=
n_classes
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
self
.
use_gpu
),
...
...
bob/learn/tensorflow/test/test_cnn.py
View file @
1a517ecd
...
...
@@ -8,12 +8,11 @@ from bob.learn.tensorflow.datashuffler import Memory, SiameseMemory, TripletMemo
from
bob.learn.tensorflow.network
import
Chopra
from
bob.learn.tensorflow.loss
import
BaseLoss
,
ContrastiveLoss
,
TripletLoss
from
bob.learn.tensorflow.trainers
import
Trainer
,
SiameseTrainer
,
TripletTrainer
,
constant
from
.test_cnn_scratch
import
validate_network
# from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer
from
bob.learn.tensorflow.util
import
load_mnist
from
bob.learn.tensorflow.utils
import
load_mnist
import
tensorflow
as
tf
import
bob.io.base
import
os
import
shutil
from
scipy.spatial.distance
import
cosine
import
bob.measure
...
...
@@ -28,7 +27,7 @@ iterations = 50
seed
=
10
def
dummy_experiment
(
data_s
,
architecture
,
session
):
def
dummy_experiment
(
data_s
,
architecture
):
"""
Create a dummy experiment and return the EER
"""
...
...
@@ -38,12 +37,12 @@ def dummy_experiment(data_s, architecture, session):
# Extracting features for enrollment
enroll_data
,
enroll_labels
=
data_shuffler
.
get_batch
()
enroll_features
=
architecture
(
enroll_data
,
session
=
session
)
enroll_features
=
architecture
(
enroll_data
)
del
enroll_data
# Extracting features for probing
probe_data
,
probe_labels
=
data_shuffler
.
get_batch
()
probe_features
=
architecture
(
probe_data
,
session
=
session
)
probe_features
=
architecture
(
probe_data
)
del
probe_data
# Creating models
...
...
@@ -102,26 +101,14 @@ def test_cnn_trainer():
prefetch
=
False
,
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
del
trainer
#Just to clean tf.variables
with
tf
.
Session
()
as
session
:
accuracy
=
validate_network
(
validation_data
,
validation_labels
,
architecture
)
# Testing
chopra
=
Chopra
(
seed
=
seed
,
fc1_output
=
10
)
chopra
.
load
(
session
,
os
.
path
.
join
(
directory
,
"model.ckp"
))
validation_data_shuffler
=
Memory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
validation_batch_size
)
[
data
,
labels
]
=
validation_data_shuffler
.
get_batch
()
predictions
=
chopra
(
data
,
session
=
session
)
accuracy
=
100.
*
numpy
.
sum
(
numpy
.
argmax
(
predictions
,
1
)
==
labels
)
/
predictions
.
shape
[
0
]
# At least 80% of accuracy
assert
accuracy
>
80.
shutil
.
rmtree
(
directory
)
del
chopra
# At least 80% of accuracy
assert
accuracy
>
80.
shutil
.
rmtree
(
directory
)
del
trainer
del
architecture
def
test_siamesecnn_trainer
():
...
...
@@ -155,19 +142,15 @@ def test_siamesecnn_trainer():
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
del
trainer
# Just to clean tf.variables
with
tf
.
Session
()
as
session
:
# Testing
chopra
=
Chopra
(
seed
=
seed
,
fc1_output
=
10
)
chopra
.
load
(
session
,
os
.
path
.
join
(
directory
,
"model.ckp"
))
eer
=
dummy_experiment
(
validation_data_shuffler
,
architecture
)
eer
=
dummy_experiment
(
validation_data_shuffler
,
chopra
,
session
)
# At least 80% of accuracy
assert
eer
<
0.25
shutil
.
rmtree
(
directory
)
# At least 80% of accuracy
assert
eer
<
0.25
shutil
.
rmtree
(
directory
)
del
chopra
del
architecture
del
trainer
# Just to clean tf.variables
def
test_tripletcnn_trainer
():
...
...
@@ -201,17 +184,13 @@ def test_tripletcnn_trainer():
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
del
trainer
# Just to clean tf.variables
with
tf
.
Session
()
as
session
:
# Testing
eer
=
dummy_experiment
(
validation_data_shuffler
,
architecture
)
# Testing
chopra
=
Chopra
(
seed
=
seed
,
fc1_output
=
10
)
chopra
.
load
(
session
,
os
.
path
.
join
(
directory
,
"model.ckp"
)
)
# At least 80% of accuracy
assert
eer
<
0.25
shutil
.
rmtree
(
directory
)
eer
=
dummy_experiment
(
validation_data_shuffler
,
chopra
,
session
)
# At least 80% of accuracy
assert
eer
<
0.25
shutil
.
rmtree
(
directory
)
del
chopra
del
architecture
del
trainer
# Just to clean tf.variables
bob/learn/tensorflow/test/test_cnn_load.py
0 → 100644
View file @
1a517ecd
#!/usr/bin/env python
# vim: set fileencoding=utf-8 :
# @author: Tiago de Freitas Pereira <tiago.pereira@idiap.ch>
# @date: Thu 13 Oct 2016 13:35 CEST
"""
Some unit tests that create networks on the fly
"""
import
numpy
import
pkg_resources
from
bob.learn.tensorflow.utils
import
load_mnist
from
bob.learn.tensorflow.network
import
SequenceNetwork
from
bob.learn.tensorflow.datashuffler
import
Memory
def
validate_network
(
validation_data
,
validation_labels
,
network
):
# Testing
validation_data_shuffler
=
Memory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
400
)
[
data
,
labels
]
=
validation_data_shuffler
.
get_batch
()
predictions
=
network
.
predict
(
data
)
accuracy
=
100.
*
numpy
.
sum
(
predictions
==
labels
)
/
predictions
.
shape
[
0
]
return
accuracy
"""
def test_load_test_cnn():
_, _, validation_data, validation_labels = load_mnist()
# Creating datashufflers
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
network = SequenceNetwork()
network.load(pkg_resources.resource_filename(__name__, 'data/cnn_mnist/model.ckp'))
accuracy = validate_network(validation_data, validation_labels, network)
assert accuracy > 80
del network
"""
bob/learn/tensorflow/test/test_cnn_pretrained_model.py
View file @
1a517ecd
...
...
@@ -9,7 +9,10 @@ import os
from
bob.learn.tensorflow.datashuffler
import
Memory
,
ImageAugmentation
from
bob.learn.tensorflow.loss
import
BaseLoss
from
bob.learn.tensorflow.trainers
import
Trainer
,
constant
from
bob.learn.tensorflow.util
import
load_mnist
from
bob.learn.tensorflow.utils
import
load_mnist
from
bob.learn.tensorflow.network
import
SequenceNetwork
from
bob.learn.tensorflow.layers
import
Conv2D
,
FullyConnected
import
tensorflow
as
tf
import
shutil
...
...
@@ -22,10 +25,36 @@ validation_batch_size = 400
iterations
=
50
seed
=
10
from
test_cnn_scratch
import
scratch_network
,
validate_network
def
scratch_network
():
# Creating a random network
scratch
=
SequenceNetwork
(
default_feature_layer
=
"fc1"
)
scratch
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
3
,
filters
=
10
,
activation
=
tf
.
nn
.
tanh
,
batch_norm
=
False
))
scratch
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
10
,
activation
=
None
,
batch_norm
=
False
))
return
scratch
def
validate_network
(
validation_data
,
validation_labels
,
network
):
# Testing
validation_data_shuffler
=
Memory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
validation_batch_size
)
[
data
,
labels
]
=
validation_data_shuffler
.
get_batch
()
predictions
=
network
.
predict
(
data
)
accuracy
=
100.
*
numpy
.
sum
(
predictions
==
labels
)
/
predictions
.
shape
[
0
]
return
accuracy
def
test_cnn_traine
r_scratch
():
def
test_cnn_
pre
traine
d
():
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
@@ -55,8 +84,7 @@ def test_cnn_trainer_scratch():
learning_rate
=
constant
(
0.05
,
name
=
"lr"
),
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
accuracy
=
validate_network
(
validation_data
,
validation_labels
,
directory
)
accuracy
=
validate_network
(
validation_data
,
validation_labels
,
scratch
)
assert
accuracy
>
85
del
scratch
...
...
@@ -77,7 +105,12 @@ def test_cnn_trainer_scratch():
trainer
.
train
(
train_data_shuffler
)
accuracy
=
validate_network
(
validation_data
,
validation_labels
,
directory2
)
assert
accuracy
>
85
accuracy
=
validate_network
(
validation_data
,
validation_labels
,
scratch
)
assert
accuracy
>
90
shutil
.
rmtree
(
directory
)
shutil
.
rmtree
(
directory2
)
del
scratch
del
loss
del
trainer
bob/learn/tensorflow/test/test_cnn_scratch.py
View file @
1a517ecd
...
...
@@ -7,12 +7,11 @@ import numpy
import
bob.io.base
import
os
from
bob.learn.tensorflow.datashuffler
import
Memory
,
ImageAugmentation
from
bob.learn.tensorflow.initialization
import
Xavier
,
Constant
from
bob.learn.tensorflow.network
import
SequenceNetwork
from
bob.learn.tensorflow.loss
import
BaseLoss
from
bob.learn.tensorflow.trainers
import
Trainer
from
bob.learn.tensorflow.utils
import
load_mnist
from
bob.learn.tensorflow.layers
import
Conv2D
,
FullyConnected
,
MaxPooling
from
bob.learn.tensorflow.layers
import
Conv2D
,
FullyConnected
import
tensorflow
as
tf
import
shutil
...
...
@@ -33,13 +32,9 @@ def scratch_network():
scratch
.
add
(
Conv2D
(
name
=
"conv1"
,
kernel_size
=
3
,
filters
=
10
,
activation
=
tf
.
nn
.
tanh
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
False
),
bias_initialization
=
Constant
(
use_gpu
=
False
),
batch_norm
=
False
))
scratch
.
add
(
FullyConnected
(
name
=
"fc1"
,
output_dim
=
10
,
activation
=
None
,
weights_initialization
=
Xavier
(
seed
=
seed
,
use_gpu
=
False
),
bias_initialization
=
Constant
(
use_gpu
=
False
),
batch_norm
=
False
))
...
...
bob/learn/tensorflow/test/test_datashuffler.py
View file @
1a517ecd
...
...
@@ -6,7 +6,7 @@
import
numpy
from
bob.learn.tensorflow.datashuffler
import
Memory
,
SiameseMemory
,
TripletMemory
,
Disk
,
SiameseDisk
,
TripletDisk
import
pkg_resources
from
.
.util
import
load_mnist
from
bob.learn.tensorflow
.util
s
import
load_mnist
import
os
"""
...
...
bob/learn/tensorflow/test/test_datashuffler_augmentation.py
View file @
1a517ecd
...
...
@@ -6,7 +6,7 @@
import
numpy
from
bob.learn.tensorflow.datashuffler
import
Memory
,
SiameseMemory
,
TripletMemory
,
Disk
,
SiameseDisk
,
TripletDisk
,
ImageAugmentation
import
pkg_resources
from
.
.util
import
load_mnist
from
bob.learn.tensorflow
.util
s
import
load_mnist
import
os
"""
...
...
bob/learn/tensorflow/test/test_dnn.py
View file @
1a517ecd
...
...
@@ -8,13 +8,9 @@ from bob.learn.tensorflow.datashuffler import Memory
from
bob.learn.tensorflow.network
import
MLP
from
bob.learn.tensorflow.loss
import
BaseLoss
from
bob.learn.tensorflow.trainers
import
Trainer
,
constant
# from ..analyzers import ExperimentAnalizer, SoftmaxAnalizer
from
bob.learn.tensorflow.util
import
load_mnist
from
bob.learn.tensorflow.utils
import
load_mnist
import
tensorflow
as
tf
import
bob.io.base
import
os
import
shutil
import
bob.measure
"""
Some unit tests for the datashuffler
...
...
@@ -26,14 +22,25 @@ iterations = 200
seed
=
10
def
validate_network
(
validation_data
,
validation_labels
,
network
):
# Testing
validation_data_shuffler
=
Memory
(
validation_data
,
validation_labels
,
input_shape
=
[
784
],
batch_size
=
validation_batch_size
)
[
data
,
labels
]
=
validation_data_shuffler
.
get_batch
()
predictions
=
network
.
predict
(
data
)
accuracy
=
100.
*
numpy
.
sum
(
predictions
==
labels
)
/
predictions
.
shape
[
0
]
return
accuracy
def
test_dnn_trainer
():
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
# Creating datashufflers
train_data_shuffler
=
Memory
(
train_data
,
train_labels
,
input_shape
=
[
28
,
28
,
1
],
input_shape
=
[
784
],
batch_size
=
batch_size
)
directory
=
"./temp/dnn"
...
...
@@ -53,21 +60,12 @@ def test_dnn_trainer():
learning_rate
=
constant
(
0.05
,
name
=
"dnn_lr"
),
temp_dir
=
directory
)
trainer
.
train
(
train_data_shuffler
)
del
trainer
# Just to clean the variables
with
tf
.
Session
()
as
session
:
# Testing
mlp
=
MLP
(
10
,
hidden_layers
=
[
15
,
20
])
mlp
.
load
(
session
,
os
.
path
.
join
(
directory
,
"model.ckp"
))
validation_data_shuffler
=
Memory
(
validation_data
,
validation_labels
,
input_shape
=
[
28
,
28
,
1
],
batch_size
=
validation_batch_size
)
[
data
,
labels
]
=
validation_data_shuffler
.
get_batch
()
predictions
=
mlp
(
data
,
session
=
session
)
accuracy
=
100.
*
numpy
.
sum
(
numpy
.
argmax
(
predictions
,
1
)
==
labels
)
/
predictions
.
shape
[
0
]
# At least 50% of accuracy for the DNN
assert
accuracy
>
50.
shutil
.
rmtree
(
directory
)
session
.
close
()
accuracy
=
validate_network
(
validation_data
,
validation_labels
,
architecture
)
# At least 50% of accuracy for the DNN
assert
accuracy
>
50.
shutil
.
rmtree
(
directory
)
del
architecture
del
trainer
# Just to clean the variables
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
1a517ecd
...
...
@@ -49,9 +49,6 @@ class SiameseTrainer(Trainer):
temp_dir
=
"cnn"
,
# Learning rate
#base_learning_rate=0.001,
#weight_decay=0.9,
#decay_steps=1000,
learning_rate
=
constant
(),
###### training options ##########
...
...
@@ -76,9 +73,6 @@ class SiameseTrainer(Trainer):
temp_dir
=
temp_dir
,
# Learning rate
#base_learning_rate=base_learning_rate,
#weight_decay=weight_decay,
#decay_steps=decay_steps,
learning_rate
=
learning_rate
,
###### training options ##########
...
...
@@ -207,7 +201,7 @@ class SiameseTrainer(Trainer):
return
feed_dict
def
fit
(
self
,
session
,
step
):
def
fit
(
self
,
step
):
"""
Run one iteration (`forward` and `backward`)
...
...
@@ -217,19 +211,19 @@ class SiameseTrainer(Trainer):
"""
if
self
.
prefetch
:
_
,
l
,
bt_class
,
wt_class
,
lr
,
summary
=
session
.
run
([
self
.
optimizer
,
self
.
training_graph
,
self
.
between_class_graph_train
,
self
.
within_class_graph_train
,
self
.
learning_rate
,
self
.
summaries_train
])
_
,
l
,
bt_class
,
wt_class
,
lr
,
summary
=
self
.
session
.
run
([
self
.
optimizer
,
self
.
training_graph
,
self
.
between_class_graph_train
,
self
.
within_class_graph_train
,
self
.
learning_rate
,
self
.
summaries_train
])
else
:
feed_dict
=
self
.
get_feed_dict
(
self
.
train_data_shuffler
)
_
,
l
,
bt_class
,
wt_class
,
lr
,
summary
=
session
.
run
([
self
.
optimizer
,
_
,
l
,
bt_class
,
wt_class
,
lr
,
summary
=
self
.
session
.
run
([
self
.
optimizer
,
self
.
training_graph
,
self
.
between_class_graph_train
,
self
.
within_class_graph_train
,
self
.
learning_rate
,
self
.
summaries_train
],
feed_dict
=
feed_dict
)
logger
.
info
(
"Loss training set step={0} = {1}"
.
format
(
step
,
l
))
self
.
train_summary_writter
.
add_summary
(
summary
,
step
)
def
compute_validation
(
self
,
session
,
data_shuffler
,
step
):
def
compute_validation
(
self
,
data_shuffler
,
step
):
"""
Computes the loss in the validation set
...
...
@@ -245,9 +239,9 @@ class SiameseTrainer(Trainer):
self
.
validation_graph
=
self
.
compute_graph
(
data_shuffler
,
name
=
"validation"
,
training
=
False
)
feed_dict
=
self
.
get_feed_dict
(
data_shuffler
)
l
,
bt_class
,
wt_class
=
session
.
run
([
self
.
validation_graph
,
self
.
between_class_graph_validation
,
self
.
within_class_graph_validation
],
feed_dict
=
feed_dict
)
l
,
bt_class
,
wt_class
=
self
.
session
.
run
([
self
.
validation_graph
,
self
.
between_class_graph_validation
,
self
.
within_class_graph_validation
],
feed_dict
=
feed_dict
)
summaries
=
[]
summaries
.
append
(
summary_pb2
.
Summary
.
Value
(
tag
=
"loss"
,
simple_value
=
float
(
l
)))
...
...
@@ -268,7 +262,7 @@ class SiameseTrainer(Trainer):
tf
.
scalar_summary
(
'lr'
,
self
.
learning_rate
,
name
=
"train"
)
return
tf
.
merge_all_summaries
()
def
load_and_enqueue
(
self
,
session
):
def
load_and_enqueue
(
self
):
"""
Injecting data in the place holder queue
...
...
@@ -285,4 +279,4 @@ class SiameseTrainer(Trainer):
placeholder_right_data
:
batch_right
,
placeholder_label
:
labels
}
session
.
run
(
self
.
enqueue_op
,
feed_dict
=
feed_dict
)
self
.
session
.
run
(
self
.
enqueue_op
,
feed_dict
=
feed_dict
)
bob/learn/tensorflow/trainers/Trainer.py
View file @
1a517ecd
...
...
@@ -306,7 +306,7 @@ class Trainer(object):
"""
saver
=
self
.
architecture
.
load
(
self
.
session
,
self
.
model_from_file
)
saver
=
self
.
architecture
.
load
(
self
.
model_from_file
)
# Loading training graph
self
.
training_graph
=
tf
.
get_collection
(
"training_graph"
)[
0
]
...
...
@@ -357,21 +357,16 @@ class Trainer(object):
logger
.
info
(
"Initializing !!"
)
config
=
tf
.
ConfigProto
(
log_device_placement
=
True
,
gpu_options
=
tf
.
GPUOptions
(
per_process_gpu_memory_fraction
=
0.333
))
config
.
gpu_options
.
allow_growth
=
True
# Pickle the architecture to save
self
.
architecture
.
pickle_net
(
train_data_shuffler
.
deployment_shape
)
#with tf.Session(config=config) as session:
Session
.
create
()
self
.
session
=
Session
.
instance
().
session
# Loading a pretrained model
if
self
.
model_from_file
!=
""
:
logger
.
info
(
"Loading pretrained model from {0}"
.
format
(
self
.
model_from_file
))
saver
=
self
.
bootstrap_graphs_fromfile
(
self
.
session
,
train_data_shuffler
,
validation_data_shuffler
)
saver
=
self
.
bootstrap_graphs_fromfile
(
train_data_shuffler
,
validation_data_shuffler
)
else
:
# Bootstraping all the graphs
self
.
bootstrap_graphs
(
train_data_shuffler
,
validation_data_shuffler
)
...
...
@@ -408,7 +403,7 @@ class Trainer(object):
for
step
in
range
(
self
.
iterations
):
start
=
time
.
time
()
self
.
fit
(
self
.
session
,
step
)
self
.
fit
(
step
)
end
=
time
.
time
()
summary
=
summary_pb2
.
Summary
.
Value
(
tag
=
"elapsed_time"
,
simple_value
=
float
(
end
-
start
))
self
.
train_summary_writter
.
add_summary
(
summary_pb2
.
Summary
(
value
=
[
summary
]),
step
)
...
...
bob/learn/tensorflow/trainers/TripletTrainer.py
View file @
1a517ecd
...
...
@@ -49,9 +49,6 @@ class TripletTrainer(Trainer):
temp_dir
=
"cnn"
,
# Learning rate
#base_learning_rate=0.001,
#weight_decay=0.9,
#decay_steps=1000,
learning_rate
=
constant
(),
###### training options ##########
...
...
@@ -76,9 +73,6 @@ class TripletTrainer(Trainer):
temp_dir
=
temp_dir
,
# Learning rate
#base_learning_rate=base_learning_rate,
#weight_decay=weight_decay,
#decay_steps=decay_steps,
learning_rate
=
learning_rate
,
###### training options ##########
...
...
@@ -213,7 +207,7 @@ class TripletTrainer(Trainer):
return
feed_dict
def
fit
(
self
,
session
,
step
):
def
fit
(
self
,
step
):
"""
Run one iteration (`forward` and `backward`)
...
...
@@ -223,21 +217,21 @@ class TripletTrainer(Trainer):