Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
bob
bob.learn.tensorflow
Commits
6ea79ebd
Commit
6ea79ebd
authored
Sep 07, 2017
by
Tiago de Freitas Pereira
Browse files
Integration with TFRecords and created tests
parent
ce4e4458
Pipeline
#12007
failed with stages
in 11 minutes and 9 seconds
Changes
15
Pipelines
1
Hide whitespace changes
Inline
Side-by-side
bob/learn/__init__.py
View file @
6ea79ebd
# see https://docs.python.org/3/library/pkgutil.html
from
pkgutil
import
extend_path
__path__
=
extend_path
(
__path__
,
__name__
)
\ No newline at end of file
__path__
=
extend_path
(
__path__
,
__name__
)
bob/learn/tensorflow/datashuffler/TFRecord.py
View file @
6ea79ebd
...
...
@@ -34,7 +34,7 @@ class TFRecord(object):
self
.
input_shape
=
tuple
(
input_shape
)
# Prefetch variables
self
.
prefetch
=
Fals
e
self
.
prefetch
=
Tru
e
self
.
prefetch_capacity
=
prefetch_capacity
self
.
prefetch_threads
=
prefetch_threads
...
...
bob/learn/tensorflow/loss/BaseLoss.py
View file @
6ea79ebd
...
...
@@ -22,3 +22,16 @@ class BaseLoss(object):
def
__call__
(
self
,
graph
,
label
):
return
self
.
operation
(
self
.
loss
(
logits
=
graph
,
labels
=
label
),
name
=
self
.
name
)
class
MeanSoftMaxLoss
(
object
):
"""
Mean softmax loss
"""
def
__init__
(
self
,
name
=
"loss"
):
self
.
name
=
name
def
__call__
(
self
,
graph
,
label
):
return
tf
.
reduce_mean
(
tf
.
nn
.
sparse_softmax_cross_entropy_with_logits
(
logits
=
graph
,
labels
=
label
),
name
=
self
.
name
)
bob/learn/tensorflow/loss/__init__.py
View file @
6ea79ebd
from
.BaseLoss
import
BaseLoss
from
.BaseLoss
import
BaseLoss
,
MeanSoftMaxLoss
from
.ContrastiveLoss
import
ContrastiveLoss
from
.TripletLoss
import
TripletLoss
from
.TripletAverageLoss
import
TripletAverageLoss
...
...
@@ -27,6 +27,7 @@ __appropriate__(
TripletFisherLoss
,
TripletAverageLoss
,
NegLogLoss
,
MeanSoftMaxLoss
)
__all__
=
[
_
for
_
in
dir
()
if
not
_
.
startswith
(
'_'
)]
...
...
bob/learn/tensorflow/script/compute_statistics.py
View file @
6ea79ebd
...
...
@@ -25,6 +25,7 @@ def process_images(base_path, extension, shape):
files
=
os
.
listdir
(
base_path
)
sum_data
=
numpy
.
zeros
(
shape
=
shape
)
print
(
"Processing {0}"
.
format
(
base_path
))
count
=
0
for
f
in
files
:
path
=
os
.
path
.
join
(
base_path
,
f
)
...
...
@@ -46,7 +47,7 @@ def main():
BASE_PATH
=
args
[
'<base_path>'
]
EXTENSION
=
args
[
'--extension'
]
SHAPE
=
[
1
,
224
,
224
]
SHAPE
=
[
3
,
224
,
224
]
count
,
sum_data
=
process_images
(
BASE_PATH
,
EXTENSION
,
SHAPE
)
...
...
bob/learn/tensorflow/test/test_cnn.py
View file @
6ea79ebd
...
...
@@ -22,9 +22,9 @@ import bob.measure
Some unit tests for the datashuffler
"""
batch_size
=
32
batch_size
=
16
validation_batch_size
=
400
iterations
=
3
00
iterations
=
2
00
seed
=
10
numpy
.
random
.
seed
(
seed
)
...
...
@@ -75,6 +75,7 @@ def dummy_experiment(data_s, embedding):
def
test_cnn_trainer
():
tf
.
reset_default_graph
()
# Loading data
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
...
...
@@ -117,14 +118,17 @@ def test_cnn_trainer():
# Using embedding to compute the accuracy
accuracy
=
validate_network
(
embedding
,
validation_data
,
validation_labels
)
# At least
8
0% of accuracy
assert
accuracy
>
5
0.
# At least
2
0% of accuracy
assert
accuracy
>
2
0.
shutil
.
rmtree
(
directory
)
del
trainer
del
graph
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_lightcnn_trainer
():
tf
.
reset_default_graph
()
# generating fake data
train_data
=
numpy
.
random
.
normal
(
0
,
0.2
,
size
=
(
100
,
128
,
128
,
1
))
...
...
@@ -158,7 +162,7 @@ def test_lightcnn_trainer():
# One graph trainer
trainer
=
Trainer
(
train_data_shuffler
,
iterations
=
5
,
iterations
=
4
,
analizer
=
None
,
temp_dir
=
directory
)
...
...
@@ -176,11 +180,13 @@ def test_lightcnn_trainer():
shutil
.
rmtree
(
directory
)
del
trainer
del
graph
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_siamesecnn_trainer
():
tf
.
reset_default_graph
()
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
@@ -224,9 +230,13 @@ def test_siamesecnn_trainer():
del
architecture
del
trainer
# Just to clean tf.variables
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_tripletcnn_trainer
():
tf
.
reset_default_graph
()
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
validation_data
=
numpy
.
reshape
(
validation_data
,
(
validation_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
@@ -265,7 +275,7 @@ def test_tripletcnn_trainer():
loss
=
loss
,
learning_rate
=
constant
(
0.01
,
name
=
"regular_lr"
),
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
0.01
),)
trainer
.
train
(
train_data_shuffler
)
trainer
.
train
()
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
)[
'anchor'
],
graph
[
'anchor'
])
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
...
...
@@ -274,4 +284,6 @@ def test_tripletcnn_trainer():
del
architecture
del
trainer
# Just to clean tf.variables
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
bob/learn/tensorflow/test/test_cnn_prefetch.py
View file @
6ea79ebd
...
...
@@ -25,12 +25,12 @@ Some unit tests for the datashuffler
batch_size
=
32
validation_batch_size
=
400
iterations
=
3
00
iterations
=
1
00
seed
=
10
def
test_cnn_trainer
():
tf
.
reset_default_graph
()
# Loading data
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
...
...
@@ -83,9 +83,14 @@ def test_cnn_trainer():
del
graph
del
embedding
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_siamesecnn_trainer
():
"""
tf.reset_default_graph()
train_data, train_labels, validation_data, validation_labels = load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
...
...
@@ -128,15 +133,20 @@ def test_siamesecnn_trainer():
embedding = Embedding(validation_data_shuffler("data", from_queue=False),
architecture(validation_data_shuffler("data", from_queue=False), reuse=True))
eer = dummy_experiment(validation_data_shuffler, embedding)
assert
eer
<
0.
1
5
assert eer < 0.
2
5
shutil.rmtree(directory)
del architecture
del trainer # Just to clean tf.variables
tf.reset_default_graph()
assert len(tf.global_variables())==0
"""
assert
==
True
def
test_tripletcnn_trainer
():
"""
tf.reset_default_graph()
train_data, train_labels, validation_data, validation_labels = load_mnist()
train_data = numpy.reshape(train_data, (train_data.shape[0], 28, 28, 1))
validation_data = numpy.reshape(validation_data, (validation_data.shape[0], 28, 28, 1))
...
...
@@ -176,15 +186,18 @@ def test_tripletcnn_trainer():
loss=loss,
learning_rate=constant(0.01, name="regular_lr"),
optimizer=tf.train.GradientDescentOptimizer(0.01),)
trainer
.
train
(
train_data_shuffler
)
trainer.train()
embedding = Embedding(validation_data_shuffler("data", from_queue=False),
architecture(validation_data_shuffler("data", from_queue=False), reuse=True))
eer = dummy_experiment(validation_data_shuffler, embedding)
assert
eer
<
0.
1
5
assert eer < 0.
2
5
shutil.rmtree(directory)
del architecture
del trainer # Just to clean tf.variables
tf.reset_default_graph()
assert len(tf.global_variables())==0
"""
assert
==
True
bob/learn/tensorflow/test/test_cnn_pretrained_model.py
View file @
6ea79ebd
...
...
@@ -24,7 +24,7 @@ Some unit tests that create networks on the fly and load variables
batch_size
=
16
validation_batch_size
=
400
iterations
=
25
0
iterations
=
10
0
seed
=
10
...
...
@@ -50,6 +50,8 @@ def scratch_network(input_pl, reuse=False):
def
test_cnn_pretrained
():
tf
.
reset_default_graph
()
# Preparing input data
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
@@ -84,7 +86,7 @@ def test_cnn_pretrained():
trainer
.
train
()
accuracy
=
validate_network
(
embedding
,
validation_data
,
validation_labels
)
assert
accuracy
>
7
0
assert
accuracy
>
2
0
tf
.
reset_default_graph
()
del
graph
...
...
@@ -96,7 +98,7 @@ def test_cnn_pretrained():
# One graph trainer
trainer
=
Trainer
(
train_data_shuffler
,
iterations
=
iterations
*
3
,
iterations
=
iterations
,
analizer
=
None
,
temp_dir
=
directory
)
...
...
@@ -104,14 +106,18 @@ def test_cnn_pretrained():
trainer
.
train
()
embedding
=
Embedding
(
trainer
.
data_ph
,
trainer
.
graph
)
accuracy
=
validate_network
(
embedding
,
validation_data
,
validation_labels
)
assert
accuracy
>
7
0
assert
accuracy
>
5
0
shutil
.
rmtree
(
directory
)
del
loss
del
trainer
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_triplet_cnn_pretrained
():
tf
.
reset_default_graph
()
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
@@ -155,7 +161,7 @@ def test_triplet_cnn_pretrained():
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
# The result is not so good
assert
eer
<
0.
2
5
assert
eer
<
0.
3
5
del
graph
del
loss
...
...
@@ -174,13 +180,17 @@ def test_triplet_cnn_pretrained():
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
# Now it is better
assert
eer
<
0.
2
0
assert
eer
<
0.
3
0
shutil
.
rmtree
(
directory
)
del
trainer
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_siamese_cnn_pretrained
():
tf
.
reset_default_graph
()
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
numpy
.
reshape
(
train_data
,
(
train_data
.
shape
[
0
],
28
,
28
,
1
))
...
...
@@ -230,7 +240,7 @@ def test_siamese_cnn_pretrained():
del
trainer
trainer
=
SiameseTrainer
(
train_data_shuffler
,
iterations
=
iterations
*
2
,
iterations
=
iterations
,
analizer
=
None
,
temp_dir
=
directory
)
...
...
@@ -240,8 +250,10 @@ def test_siamese_cnn_pretrained():
#embedding = Embedding(train_data_shuffler("data", from_queue=False)['left'], trainer.graph['left'])
embedding
=
Embedding
(
trainer
.
data_ph
[
'left'
],
trainer
.
graph
[
'left'
])
eer
=
dummy_experiment
(
validation_data_shuffler
,
embedding
)
assert
eer
<
0.1
0
assert
eer
<
0.1
4
shutil
.
rmtree
(
directory
)
del
trainer
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
bob/learn/tensorflow/test/test_cnn_scratch.py
View file @
6ea79ebd
...
...
@@ -6,7 +6,7 @@
import
numpy
from
bob.learn.tensorflow.datashuffler
import
Memory
,
ImageAugmentation
,
ScaleFactor
,
Linear
,
TFRecord
from
bob.learn.tensorflow.network
import
Embedding
from
bob.learn.tensorflow.loss
import
BaseLoss
from
bob.learn.tensorflow.loss
import
BaseLoss
,
MeanSoftMaxLoss
from
bob.learn.tensorflow.trainers
import
Trainer
,
constant
from
bob.learn.tensorflow.utils
import
load_mnist
import
tensorflow
as
tf
...
...
@@ -19,7 +19,7 @@ Some unit tests that create networks on the fly
batch_size
=
16
validation_batch_size
=
400
iterations
=
3
00
iterations
=
2
00
seed
=
10
directory
=
"./temp/cnn_scratch"
...
...
@@ -78,7 +78,7 @@ def test_cnn_trainer_scratch():
embedding
=
Embedding
(
train_data_shuffler
(
"data"
,
from_queue
=
False
),
graph
)
# Loss for the softmax
loss
=
BaseLoss
(
tf
.
nn
.
sparse_s
oft
m
ax
_cross_entropy_with_logits
,
tf
.
reduce_mean
)
loss
=
MeanS
oft
M
ax
Loss
(
)
# One graph trainer
trainer
=
Trainer
(
train_data_shuffler
,
...
...
@@ -94,16 +94,19 @@ def test_cnn_trainer_scratch():
trainer
.
train
()
accuracy
=
validate_network
(
embedding
,
validation_data
,
validation_labels
)
assert
accuracy
>
7
0
assert
accuracy
>
2
0
shutil
.
rmtree
(
directory
)
del
trainer
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_cnn_trainer_scratch_tfrecord
():
tf
.
reset_default_graph
()
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
train_data
=
train_data
.
astype
(
"float32"
)
*
0.00390625
validation_data
=
validation_data
.
astype
(
"float32"
)
*
0.00390625
def
_bytes_feature
(
value
):
return
tf
.
train
.
Feature
(
bytes_list
=
tf
.
train
.
BytesList
(
value
=
[
value
]))
...
...
@@ -111,16 +114,16 @@ def test_cnn_trainer_scratch_tfrecord():
def
_int64_feature
(
value
):
return
tf
.
train
.
Feature
(
int64_list
=
tf
.
train
.
Int64List
(
value
=
[
value
]))
def
create_tf_record
(
tfrecords_filename
):
def
create_tf_record
(
tfrecords_filename
,
data
,
labels
):
writer
=
tf
.
python_io
.
TFRecordWriter
(
tfrecords_filename
)
for
i
in
range
(
train_data
.
shape
[
0
]):
img
=
train_data
[
i
]
#for i in range(train_data.shape[0]):
for
i
in
range
(
6000
):
img
=
data
[
i
]
img_raw
=
img
.
tostring
()
feature
=
{
'train/image'
:
_bytes_feature
(
img_raw
),
'train/label'
:
_int64_feature
(
train_
labels
[
i
])
'train/label'
:
_int64_feature
(
labels
[
i
])
}
example
=
tf
.
train
.
Example
(
features
=
tf
.
train
.
Features
(
feature
=
feature
))
...
...
@@ -130,34 +133,50 @@ def test_cnn_trainer_scratch_tfrecord():
tf
.
reset_default_graph
()
# Creating the tf record
tfrecords_filename
=
"mnist_train.tfrecords"
create_tf_record
(
tfrecords_filename
)
filename_queue
=
tf
.
train
.
string_input_producer
([
tfrecords_filename
],
num_epochs
=
1
,
name
=
"input"
)
tfrecords_filename
=
"mnist_train.tfrecords"
create_tf_record
(
tfrecords_filename
,
train_data
,
train_labels
)
filename_queue
=
tf
.
train
.
string_input_producer
([
tfrecords_filename
],
num_epochs
=
15
,
name
=
"input"
)
tfrecords_filename_val
=
"mnist_validation.tfrecords"
create_tf_record
(
tfrecords_filename_val
,
validation_data
,
validation_labels
)
filename_queue_val
=
tf
.
train
.
string_input_producer
([
tfrecords_filename_val
],
num_epochs
=
15
,
name
=
"input_validation"
)
# Creating the CNN using the TFRecord as input
train_data_shuffler
=
TFRecord
(
filename_queue
=
filename_queue
,
batch_size
=
batch_size
)
validation_data_shuffler
=
TFRecord
(
filename_queue
=
filename_queue_val
,
batch_size
=
2000
)
graph
=
scratch_network
(
train_data_shuffler
)
validation_graph
=
scratch_network
(
validation_data_shuffler
,
reuse
=
True
)
# Setting the placeholders
# Loss for the softmax
loss
=
BaseLoss
(
tf
.
nn
.
sparse_s
oft
m
ax
_cross_entropy_with_logits
,
tf
.
reduce_mean
)
loss
=
MeanS
oft
M
ax
Loss
(
)
# One graph trainer
trainer
=
Trainer
(
train_data_shuffler
,
iterations
=
iterations
,
validation_data_shuffler
=
validation_data_shuffler
,
iterations
=
iterations
,
#It is supper fast
analizer
=
None
,
temp_dir
=
directory
)
learning_rate
=
constant
(
0.01
,
name
=
"regular_lr"
)
trainer
.
create_network_from_scratch
(
graph
=
graph
,
validation_graph
=
validation_graph
,
loss
=
loss
,
learning_rate
=
learning_rate
,
optimizer
=
tf
.
train
.
GradientDescentOptimizer
(
learning_rate
),
)
trainer
.
train
()
os
.
remove
(
tfrecords_filename
)
os
.
remove
(
tfrecords_filename_val
)
assert
True
tf
.
reset_default_graph
()
del
trainer
assert
len
(
tf
.
global_variables
())
==
0
bob/learn/tensorflow/test/test_dnn.py
View file @
6ea79ebd
...
...
@@ -37,6 +37,8 @@ def validate_network(embedding, validation_data, validation_labels):
def
test_dnn_trainer
():
tf
.
reset_default_graph
()
train_data
,
train_labels
,
validation_data
,
validation_labels
=
load_mnist
()
# Creating datashufflers
...
...
@@ -79,3 +81,5 @@ def test_dnn_trainer():
del
architecture
del
trainer
# Just to clean the variables
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
bob/learn/tensorflow/test/test_inception.py
View file @
6ea79ebd
...
...
@@ -22,6 +22,8 @@ seed = 10
def
test_inception_trainer
():
tf
.
reset_default_graph
()
directory
=
"./temp/inception"
# Loading data
...
...
@@ -52,9 +54,13 @@ def test_inception_trainer():
)
trainer
.
train
()
shutil
.
rmtree
(
directory
)
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_inception_triplet_trainer
():
tf
.
reset_default_graph
()
directory
=
"./temp/inception"
# Loading data
...
...
@@ -89,3 +95,5 @@ def test_inception_triplet_trainer():
)
trainer
.
train
()
shutil
.
rmtree
(
directory
)
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
bob/learn/tensorflow/test/test_layers.py
View file @
6ea79ebd
...
...
@@ -11,25 +11,38 @@ from nose.tools import assert_raises_regexp
slim
=
tf
.
contrib
.
slim
def
test_simple
():
tf
.
reset_default_graph
()
x
=
np
.
zeros
([
64
,
10
,
36
])
graph
=
maxout
(
x
,
num_units
=
3
)
assert
graph
.
get_shape
().
as_list
()
==
[
64
,
10
,
3
]
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_fully_connected
():
tf
.
reset_default_graph
()
x
=
np
.
zeros
([
64
,
50
])
graph
=
slim
.
fully_connected
(
x
,
50
,
activation_fn
=
None
)
graph
=
maxout
(
graph
,
num_units
=
10
)
assert
graph
.
get_shape
().
as_list
()
==
[
64
,
10
]
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_nchw
():
tf
.
reset_default_graph
()
x
=
np
.
random
.
uniform
(
size
=
(
10
,
100
,
100
,
3
)).
astype
(
np
.
float32
)
graph
=
slim
.
conv2d
(
x
,
10
,
[
3
,
3
])
graph
=
maxout
(
graph
,
num_units
=
1
)
assert
graph
.
get_shape
().
as_list
()
==
[
10
,
100
,
100
,
1
]
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
def
test_invalid_shape
():
tf
.
reset_default_graph
()
x
=
np
.
random
.
uniform
(
size
=
(
10
,
100
,
100
,
3
)).
astype
(
np
.
float32
)
graph
=
slim
.
conv2d
(
x
,
3
,
[
3
,
3
])
with
assert_raises_regexp
(
ValueError
,
'number of features'
):
graph
=
maxout
(
graph
,
num_units
=
2
)
tf
.
reset_default_graph
()
assert
len
(
tf
.
global_variables
())
==
0
bob/learn/tensorflow/trainers/SiameseTrainer.py
View file @
6ea79ebd
...
...
@@ -49,11 +49,13 @@ class SiameseTrainer(Trainer):
def
__init__
(
self
,
train_data_shuffler
,
validation_data_shuffler
=
None
,
###### training options ##########
iterations
=
5000
,
snapshot
=
500
,
validation_snapshot
=
100
,
keep_checkpoint_every_n_hours
=
2
,
## Analizer
analizer
=
SoftmaxAnalizer
(),
...
...
@@ -65,19 +67,23 @@ class SiameseTrainer(Trainer):
):
self
.
train_data_shuffler
=
train_data_shuffler
self
.
temp_dir
=
temp_dir
self
.
iterations
=
iterations
self
.
snapshot
=
snapshot
self
.
validation_snapshot
=
validation_snapshot
self
.
keep_checkpoint_every_n_hours
=
keep_checkpoint_every_n_hours
# Training variables used in the fit
self
.
summaries_train
=
None
self
.
train_summary_writter
=
None
self
.
thread_pool
=
None
# Validation data
self
.
validation_summary_writter
=
None
self
.
summaries_validation
=
None
self
.
validation_data_shuffler
=
validation_data_shuffler
# Analizer
self
.
analizer
=
analizer
...
...
@@ -86,14 +92,25 @@ class SiameseTrainer(Trainer):
self
.
session
=
None
self
.
graph
=
None
self
.
validation_graph
=
None
self
.
loss
=
None
self
.
predictor
=
None
self
.
validation_predictor
=
None
self
.
optimizer_class
=
None
self
.
learning_rate
=
None
# Training variables used in the fit
self
.
optimizer
=
None
self
.
data_ph
=
None
self
.
label_ph
=
None
self
.
validation_data_ph
=
None
self
.
validation_label_ph
=
None
self
.
saver
=
None
bob
.
core
.
log
.
set_verbosity_level
(
logger
,
verbosity_level
)
...
...
@@ -102,8 +119,6 @@ class SiameseTrainer(Trainer):